Commit 019b3fd9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc updates from Michael Ellerman:

 - A big series refactoring parts of our KVM code, and converting some
   to C.

 - Support for ARCH_HAS_SET_MEMORY, and ARCH_HAS_STRICT_MODULE_RWX on
   some CPUs.

 - Support for the Microwatt soft-core.

 - Optimisations to our interrupt return path on 64-bit.

 - Support for userspace access to the NX GZIP accelerator on PowerVM on
   Power10.

 - Enable KUAP and KUEP by default on 32-bit Book3S CPUs.

 - Other smaller features, fixes & cleanups.

Thanks to: Andy Shevchenko, Aneesh Kumar K.V, Arnd Bergmann, Athira
Rajeev, Baokun Li, Benjamin Herrenschmidt, Bharata B Rao, Christophe
Leroy, Daniel Axtens, Daniel Henrique Barboza, Finn Thain, Geoff Levand,
Haren Myneni, Jason Wang, Jiapeng Chong, Joel Stanley, Jordan Niethe,
Kajol Jain, Nathan Chancellor, Nathan Lynch, Naveen N. Rao, Nicholas
Piggin, Nick Desaulniers, Paul Mackerras, Russell Currey, Sathvika
Vasireddy, Shaokun Zhang, Stephen Rothwell, Sudeep Holla, Suraj Jitindar
Singh, Tom Rix, Vaibhav Jain, YueHaibing, Zhang Jianhua, and Zhen Lei.

* tag 'powerpc-5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (218 commits)
  powerpc: Only build restart_table.c for 64s
  powerpc/64s: move ret_from_fork etc above __end_soft_masked
  powerpc/64s/interrupt: clean up interrupt return labels
  powerpc/64/interrupt: add missing kprobe annotations on interrupt exit symbols
  powerpc/64: enable MSR[EE] in irq replay pt_regs
  powerpc/64s/interrupt: preserve regs->softe for NMI interrupts
  powerpc/64s: add a table of implicit soft-masked addresses
  powerpc/64e: remove implicit soft-masking and interrupt exit restart logic
  powerpc/64e: fix CONFIG_RELOCATABLE build warnings
  powerpc/64s: fix hash page fault interrupt handler
  powerpc/4xx: Fix setup_kuep() on SMP
  powerpc/32s: Fix setup_{kuap/kuep}() on SMP
  powerpc/interrupt: Use names in check_return_regs_valid()
  powerpc/interrupt: Also use exit_must_hard_disable() on PPC32
  powerpc/sysfs: Replace sizeof(arr)/sizeof(arr[0]) with ARRAY_SIZE
  powerpc/ptrace: Refactor regs_set_return_{msr/ip}
  powerpc/ptrace: Move set_return_regs_changed() before regs_set_return_{msr/ip}
  powerpc/stacktrace: Fix spurious "stale" traces in raise_backtrace_ipi()
  powerpc/pseries/vas: Include irqdomain.h
  powerpc: mark local variables around longjmp as volatile
  ...
parents 4cad6719 4ebbbaa4
......@@ -39,9 +39,11 @@ KernelVersion: v5.9
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
Description:
(RO) Report various performance stats related to papr-scm NVDIMM
device. Each stat is reported on a new line with each line
composed of a stat-identifier followed by it value. Below are
currently known dimm performance stats which are reported:
device. This attribute is only available for NVDIMM devices
that support reporting NVDIMM performance stats. Each stat is
reported on a new line with each line composed of a
stat-identifier followed by it value. Below are currently known
dimm performance stats which are reported:
* "CtlResCt" : Controller Reset Count
* "CtlResTm" : Controller Reset Elapsed Time
......
......@@ -140,7 +140,9 @@ config PPC
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
......@@ -266,6 +268,7 @@ config PPC
select PPC_DAWR if PPC64
select RTC_LIB
select SPARSE_IRQ
select STRICT_KERNEL_RWX if STRICT_MODULE_RWX
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select VIRT_TO_BUS if !PPC64
......@@ -289,6 +292,7 @@ config PANIC_TIMEOUT
config COMPAT
bool "Enable support for 32bit binaries"
depends on PPC64
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
default y if !CPU_LITTLE_ENDIAN
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
......@@ -422,7 +426,7 @@ source "kernel/Kconfig.hz"
config MATH_EMULATION
bool "Math emulation"
depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE
depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE || PPC_MICROWATT
select PPC_FPU_REGS
help
Some PowerPC chips designed for embedded applications do not have
......
......@@ -84,6 +84,11 @@ config MSI_BITMAP_SELFTEST
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
depends on PPC64
config PPC_RFI_SRR_DEBUG
bool "Include extra checks for RFI SRR register validity"
depends on PPC_BOOK3S_64
config XMON
bool "Include xmon kernel debugger"
......
......@@ -376,6 +376,16 @@ ppc64_book3e_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-64bit.config \
-f $(srctree)/Makefile allmodconfig
PHONY += ppc32_randconfig
ppc32_randconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/32-bit.config \
-f $(srctree)/Makefile randconfig
PHONY += ppc64_randconfig
ppc64_randconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/64-bit.config \
-f $(srctree)/Makefile randconfig
define archhelp
@echo '* zImage - Build default images selected by kernel config'
@echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
......
......@@ -163,6 +163,8 @@ src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
src-plat-$(CONFIG_PPC_MICROWATT) += fixed-head.S microwatt.c
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
src-boot := $(src-wlib) $(src-plat) empty.c
......@@ -227,7 +229,7 @@ $(obj)/wrapper.a: $(obj-wlib) FORCE
hostprogs := addnote hack-coff mktree
targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
......@@ -355,6 +357,8 @@ image-$(CONFIG_MVME5100) += dtbImage.mvme5100
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
image-$(CONFIG_PPC_MICROWATT) += dtbImage.microwatt
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
ifdef CONFIG_PPC32
......
......@@ -99,8 +99,8 @@ static void print_err(char *s)
* partial_decompress - decompresses part or all of a compressed buffer
* @inbuf: input buffer
* @input_size: length of the input buffer
* @outbuf: input buffer
* @output_size: length of the input buffer
* @outbuf: output buffer
* @output_size: length of the output buffer
* @skip number of output bytes to ignore
*
* This function takes compressed data from inbuf, decompresses and write it to
......
......@@ -13,6 +13,7 @@
#include "string.h"
#include "stdio.h"
#include "ops.h"
#include "of.h"
void dt_fixup_memory(u64 start, u64 size)
{
......@@ -23,21 +24,25 @@ void dt_fixup_memory(u64 start, u64 size)
root = finddevice("/");
if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
naddr = 2;
else
naddr = be32_to_cpu(naddr);
if (naddr < 1 || naddr > 2)
fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
nsize = 1;
else
nsize = be32_to_cpu(nsize);
if (nsize < 1 || nsize > 2)
fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
i = 0;
if (naddr == 2)
memreg[i++] = start >> 32;
memreg[i++] = start & 0xffffffff;
memreg[i++] = cpu_to_be32(start >> 32);
memreg[i++] = cpu_to_be32(start & 0xffffffff);
if (nsize == 2)
memreg[i++] = size >> 32;
memreg[i++] = size & 0xffffffff;
memreg[i++] = cpu_to_be32(size >> 32);
memreg[i++] = cpu_to_be32(size & 0xffffffff);
memory = finddevice("/memory");
if (! memory) {
......@@ -45,9 +50,9 @@ void dt_fixup_memory(u64 start, u64 size)
setprop_str(memory, "device_type", "memory");
}
printf("Memory <- <0x%x", memreg[0]);
printf("Memory <- <0x%x", be32_to_cpu(memreg[0]));
for (i = 1; i < (naddr + nsize); i++)
printf(" 0x%x", memreg[i]);
printf(" 0x%x", be32_to_cpu(memreg[i]));
printf("> (%ldMB)\n\r", (unsigned long)(size >> 20));
setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32));
......@@ -65,10 +70,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus));
while ((devp = find_node_by_devtype(devp, "cpu"))) {
setprop_val(devp, "clock-frequency", cpu);
setprop_val(devp, "timebase-frequency", tb);
setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
setprop_val(devp, "timebase-frequency", cpu_to_be32(tb));
if (bus > 0)
setprop_val(devp, "bus-frequency", bus);
setprop_val(devp, "bus-frequency", cpu_to_be32(bus));
}
timebase_period_ns = 1000000000 / tb;
......@@ -80,7 +85,7 @@ void dt_fixup_clock(const char *path, u32 freq)
if (devp) {
printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq));
setprop_val(devp, "clock-frequency", freq);
setprop_val(devp, "clock-frequency", cpu_to_be32(freq));
}
}
......@@ -133,8 +138,12 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
{
if (getprop(node, "#address-cells", naddr, 4) != 4)
*naddr = 2;
else
*naddr = be32_to_cpu(*naddr);
if (getprop(node, "#size-cells", nsize, 4) != 4)
*nsize = 1;
else
*nsize = be32_to_cpu(*nsize);
}
static void copy_val(u32 *dest, u32 *src, int naddr)
......@@ -163,9 +172,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr)
int i, carry = 0;
for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) {
u64 tmp = (u64)reg[i] + add[i] + carry;
u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry;
carry = tmp >> 32;
reg[i] = (u32)tmp;
reg[i] = cpu_to_be32((u32)tmp);
}
return !carry;
......@@ -180,18 +189,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
u32 end;
for (i = 0; i < MAX_ADDR_CELLS; i++) {
if (reg[i] < range[i])
if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
return 0;
if (reg[i] > range[i])
if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
break;
}
for (i = 0; i < MAX_ADDR_CELLS; i++) {
end = range[i] + rangesize[i];
end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
if (reg[i] < end)
if (be32_to_cpu(reg[i]) < end)
break;
if (reg[i] > end)
if (be32_to_cpu(reg[i]) > end)
return 0;
}
......@@ -240,7 +249,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
return 0;
dt_get_reg_format(parent, &naddr, &nsize);
if (nsize > 2)
return 0;
......@@ -252,10 +260,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
copy_val(last_addr, prop_buf + offset, naddr);
ret_size = prop_buf[offset + naddr];
ret_size = be32_to_cpu(prop_buf[offset + naddr]);
if (nsize == 2) {
ret_size <<= 32;
ret_size |= prop_buf[offset + naddr + 1];
ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]);
}
for (;;) {
......@@ -278,7 +286,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
offset = find_range(last_addr, prop_buf, prev_naddr,
naddr, prev_nsize, buflen / 4);
if (offset < 0)
return 0;
......@@ -296,8 +303,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
if (naddr > 2)
return 0;
ret_addr = ((u64)last_addr[2] << 32) | last_addr[3];
ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]);
if (sizeof(void *) == 4 &&
(ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL ||
ret_addr + ret_size > 0x100000000ULL))
......@@ -350,11 +356,14 @@ int dt_is_compatible(void *node, const char *compat)
int dt_get_virtual_reg(void *node, void **addr, int nres)
{
unsigned long xaddr;
int n;
int n, i;
n = getprop(node, "virtual-reg", addr, nres * 4);
if (n > 0)
if (n > 0) {
for (i = 0; i < n/4; i ++)
((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]);
return n / 4;
}
for (n = 0; n < nres; n++) {
if (!dt_xlate_reg(node, n, &xaddr, NULL))
......
/dts-v1/;
/ {
#size-cells = <0x02>;
#address-cells = <0x02>;
model-name = "microwatt";
compatible = "microwatt-soc";
aliases {
serial0 = &UART0;
};
reserved-memory {
#size-cells = <0x02>;
#address-cells = <0x02>;
ranges;
};
memory@0 {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000 0x10000000>;
};
cpus {
#size-cells = <0x00>;
#address-cells = <0x01>;
ibm,powerpc-cpu-features {
display-name = "Microwatt";
isa = <3000>;
device_type = "cpu-features";
compatible = "ibm,powerpc-cpu-features";
mmu-radix {
isa = <3000>;
usable-privilege = <2>;
};
little-endian {
isa = <2050>;
usable-privilege = <3>;
hwcap-bit-nr = <1>;
};
cache-inhibited-large-page {
isa = <2040>;
usable-privilege = <2>;
};
fixed-point-v3 {
isa = <3000>;
usable-privilege = <3>;
};
no-execute {
isa = <2010>;
usable-privilege = <2>;
};
floating-point {
hwcap-bit-nr = <27>;
isa = <0>;
usable-privilege = <3>;
};
};
PowerPC,Microwatt@0 {
i-cache-sets = <2>;
ibm,dec-bits = <64>;
reservation-granule-size = <64>;
clock-frequency = <100000000>;
timebase-frequency = <100000000>;
i-tlb-sets = <1>;
ibm,ppc-interrupt-server#s = <0>;
i-cache-block-size = <64>;
d-cache-block-size = <64>;
d-cache-sets = <2>;
i-tlb-size = <64>;
cpu-version = <0x990000>;
status = "okay";
i-cache-size = <0x1000>;
ibm,processor-radix-AP-encodings = <0x0c 0xa0000010 0x20000015 0x4000001e>;
tlb-size = <0>;
tlb-sets = <0>;
device_type = "cpu";
d-tlb-size = <128>;
d-tlb-sets = <2>;
reg = <0>;
general-purpose;
64-bit;
d-cache-size = <0x1000>;
ibm,chip-id = <0>;
};
};
soc@c0000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&ICS>;
ranges = <0 0 0xc0000000 0x40000000>;
interrupt-controller@4000 {
compatible = "openpower,xics-presentation", "ibm,ppc-xicp";
ibm,interrupt-server-ranges = <0x0 0x1>;
reg = <0x4000 0x100>;
};
ICS: interrupt-controller@5000 {
compatible = "openpower,xics-sources";
interrupt-controller;
interrupt-ranges = <0x10 0x10>;
reg = <0x5000 0x100>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
};
UART0: serial@2000 {
device_type = "serial";
compatible = "ns16550";
reg = <0x2000 0x8>;
clock-frequency = <100000000>;
current-speed = <115200>;
reg-shift = <2>;
fifo-size = <16>;
interrupts = <0x10 0x1>;
};
};
chosen {
bootargs = "";
ibm,architecture-vec-5 = [19 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 40 00 40];
stdout-path = &UART0;
};
};
// SPDX-License-Identifier: GPL-2.0-or-later
#include <stddef.h>
#include "stdio.h"
#include "types.h"
#include "io.h"
#include "ops.h"
BSS_STACK(8192);
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
{
unsigned long heapsize = 16*1024*1024 - (unsigned long)_end;
/*
* Disable interrupts and turn off MSR_RI, since we'll
* shortly be overwriting the interrupt vectors.
*/
__asm__ volatile("mtmsrd %0,1" : : "r" (0));
simple_alloc_init(_end, heapsize, 32, 64);
fdt_init(_dtb_start);
serial_console_init();
}
......@@ -15,6 +15,7 @@
#include "stdio.h"
#include "io.h"
#include "ops.h"
#include "of.h"
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
......@@ -58,16 +59,20 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp)
int n;
u32 reg_offset;
if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) {
printf("virt reg parse fail...\r\n");
return -1;
}
n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
if (n == sizeof(reg_offset))
reg_base += reg_offset;
reg_base += be32_to_cpu(reg_offset);
n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
else
reg_shift = be32_to_cpu(reg_shift);
scdp->open = ns16550_open;
scdp->putc = ns16550_putc;
......
......@@ -342,6 +342,11 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
microwatt)
link_address='0x500000'
platformo="$object/fixed-head.o $object/$platform.o"
binary=y
;;
treeboot-currituck)
link_address='0x1000000'
;;
......
......@@ -8,7 +8,7 @@ SECTIONS
.kernel:vmlinux.bin : { *(.kernel:vmlinux.bin) }
_vmlinux_end = .;
. = ALIGN(4096);
. = ALIGN(8);
_dtb_start = .;
.kernel:dtb : { *(.kernel:dtb) }
_dtb_end = .;
......
# CONFIG_PPC64 is not set
# CONFIG_SWAP is not set
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_TICK_CPU_ACCOUNTING=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_PPC64=y
# CONFIG_PPC_KUEP is not set
# CONFIG_PPC_KUAP is not set
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_NR_IRQS=64
CONFIG_PANIC_TIMEOUT=10
# CONFIG_PPC_POWERNV is not set
# CONFIG_PPC_PSERIES is not set
CONFIG_PPC_MICROWATT=y
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_CPU_FREQ=y
CONFIG_HZ_100=y
# CONFIG_PPC_MEM_KEYS is not set
# CONFIG_SECCOMP is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
# CONFIG_COREDUMP is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=y
CONFIG_INET=y
CONFIG_INET_UDP_DIAG=y
CONFIG_INET_RAW_DIAG=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_PARTITIONED_MASTER=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_NVRAM is not set
CONFIG_RANDOM_TRUST_CPU=y
CONFIG_SPI=y
CONFIG_SPI_DEBUG=y
CONFIG_SPI_BITBANG=y
CONFIG_SPI_SPIDEV=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_NVMEM is not set
CONFIG_EXT4_FS=y
# CONFIG_FILE_LOCKING is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_CRYPTO_HW is not set
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
CONFIG_PRINTK_TIME=y
# CONFIG_SYMBOLIC_ERRNAME is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_MISC is not set
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
# CONFIG_STRICT_DEVMEM is not set
CONFIG_PPC_DISABLE_WERROR=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
# CONFIG_XMON_DEFAULT_RO_MODE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
......@@ -57,3 +57,28 @@ CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PPC_16K_PAGES=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_FS=y
CONFIG_PPC_PTDUMP=y
CONFIG_MODULES=y
CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_DEV_TALITOS=y
CONFIG_8xx_GPIO=y
CONFIG_WATCHDOG=y
CONFIG_8xxx_WDT=y
CONFIG_SMC_UCODE_PATCH=y
CONFIG_ADVANCED_OPTIONS=y
CONFIG_PIN_TLB=y
CONFIG_PERF_EVENTS=y
CONFIG_MATH_EMULATION=y
CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_IPV6=y
CONFIG_BPF_JIT=y
CONFIG_DEBUG_VM_PGTABLE=y
CONFIG_BDI_SWITCH=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_PPC_EARLY_DEBUG_CPM_ADDR=0xff002008
......@@ -309,6 +309,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PPC_EMULATED_STATS=y
......
......@@ -368,7 +368,9 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_FUNCTION_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
......
......@@ -289,7 +289,9 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
......
......@@ -71,8 +71,13 @@ void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
#ifdef CONFIG_PPC64
unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
#endif
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
......
......@@ -46,6 +46,8 @@
# define SMPWMB eieio
#endif
/* clang defines this macro for a builtin, which will not work with runtime patching */
#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
#define _ASM_POWERPC_BOOK3S_32_HASH_H
#ifdef __KERNEL__
/*
* The "classic" 32-bit implementation of the PowerPC MMU uses a hash
* table containing PTEs, together with a set of 16 segment registers,
* to define the virtual to physical address mapping.
*
* We use the hash table as an extended TLB, i.e. a cache of currently
* active mappings. We maintain a two-level page table tree, much
* like that used by the i386, for the sake of the Linux memory
* management code. Low-level assembler code in hash_low_32.S
* (procedure hash_page) is responsible for extracting ptes from the
* tree and putting them into the hash table when necessary, and
* updating the accessed and modified bits in the page table tree.
*/
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
#define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT
/* We never clear the high word of the pte */
#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
#else
#define _PTE_NONE_MASK _PAGE_HASHPTE
#endif
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
......@@ -7,35 +7,104 @@
#ifndef __ASSEMBLY__
#include <linux/jump_label.h>
extern struct static_key_false disable_kuap_key;
extern struct static_key_false disable_kuep_key;
static __always_inline bool kuap_is_disabled(void)
{
return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
}
static __always_inline bool kuep_is_disabled(void)
{
return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
}
static inline void kuep_lock(void)
{
if (kuep_is_disabled())
return;
update_user_segments(mfsr(0) | SR_NX);
}
static inline void kuep_unlock(void)
{
if (kuep_is_disabled())
return;
update_user_segments(mfsr(0) & ~SR_NX);
}
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
#define KUAP_NONE (~0UL)
#define KUAP_ALL (~1UL)
static inline void kuap_lock_one(unsigned long addr)
{
addr &= 0xf0000000; /* align addr to start of segment */
barrier(); /* make sure thread.kuap is updated before playing with SRs */
while (addr < end) {
mtsr(sr, addr);
sr += 0x111; /* next VSID */
sr &= 0xf0ffffff; /* clear VSID overflow */
addr += 0x10000000; /* address of next segment */
}
mtsr(mfsr(addr) | SR_KS, addr);
isync(); /* Context sync required after mtsr() */
}
static inline void kuap_unlock_one(unsigned long addr)
{
mtsr(mfsr(addr) & ~SR_KS, addr);
isync(); /* Context sync required after mtsr() */
}
static inline void kuap_lock_all(void)
{
update_user_segments(mfsr(0) | SR_KS);
isync(); /* Context sync required after mtsr() */
}
static inline void kuap_unlock_all(void)
{
update_user_segments(mfsr(0) & ~SR_KS);
isync(); /* Context sync required after mtsr() */
}
void kuap_lock_all_ool(void);
void kuap_unlock_all_ool(void);
static inline void kuap_lock(unsigned long addr, bool ool)
{
if (likely(addr != KUAP_ALL))
kuap_lock_one(addr);
else if (!ool)
kuap_lock_all();
else
kuap_lock_all_ool();
}
static inline void kuap_unlock(unsigned long addr, bool ool)
{
if (likely(addr != KUAP_ALL))
kuap_unlock_one(addr);
else if (!ool)
kuap_unlock_all();
else
kuap_unlock_all_ool();
}
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
unsigned long kuap = current->thread.kuap;
u32 addr = kuap & 0xf0000000;
u32 end = kuap << 28;
if (kuap_is_disabled())
return;
regs->kuap = kuap;
if (unlikely(!kuap))
if (unlikely(kuap == KUAP_NONE))
return;
current->thread.kuap = 0;
kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* Set Ks */
current->thread.kuap = KUAP_NONE;
kuap_lock(kuap, false);
}
static inline void kuap_user_restore(struct pt_regs *regs)
......@@ -44,22 +113,22 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
u32 addr = regs->kuap & 0xf0000000;
u32 end = regs->kuap << 28;
if (kuap_is_disabled())
return;
current->thread.kuap = regs->kuap;
if (unlikely(regs->kuap == kuap))
return;
kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
kuap_unlock(regs->kuap, false);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
unsigned long kuap = current->thread.kuap;
WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != 0);
if (kuap_is_disabled())
return KUAP_NONE;
WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
return kuap;
}
......@@ -72,84 +141,78 @@ static inline void kuap_assert_locked(void)
static __always_inline void allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
u32 addr, end;
if (kuap_is_disabled())
return;
BUILD_BUG_ON(!__builtin_constant_p(dir));
BUILD_BUG_ON(dir & ~KUAP_READ_WRITE);
if (!(dir & KUAP_WRITE))
return;
addr = (__force u32)to;
if (unlikely(addr >= TASK_SIZE || !size))
return;
end = min(addr + size, TASK_SIZE);
current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
current->thread.kuap = (__force u32)to;
kuap_unlock_one((__force u32)to);
}
static __always_inline void prevent_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
static __always_inline void prevent_user_access(unsigned long dir)
{
u32 addr, end;
BUILD_BUG_ON(!__builtin_constant_p(dir));
u32 kuap = current->thread.kuap;
if (dir & KUAP_CURRENT_WRITE) {
u32 kuap = current->thread.kuap;
if (unlikely(!kuap))
return;
if (kuap_is_disabled())
return;
addr = kuap & 0xf0000000;
end = kuap << 28;
} else if (dir & KUAP_WRITE) {
addr = (__force u32)to;
end = min(addr + size, TASK_SIZE);
BUILD_BUG_ON(!__builtin_constant_p(dir));
if (unlikely(addr >= TASK_SIZE || !size))
return;
} else {
if (!(dir & KUAP_WRITE))
return;
}
current->thread.kuap = 0;
kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */
current->thread.kuap = KUAP_NONE;
kuap_lock(kuap, true);
}
static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
unsigned long addr = flags & 0xf0000000;
unsigned long end = flags << 28;
void __user *to = (__force void __user *)addr;
if (flags)
prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
if (kuap_is_disabled())
return KUAP_NONE;
if (flags != KUAP_NONE) {
current->thread.kuap = KUAP_NONE;
kuap_lock(flags, true);
}
return flags;
}
static inline void restore_user_access(unsigned long flags)
{
unsigned long addr = flags & 0xf0000000;
unsigned long end = flags << 28;
void __user *to = (__force void __user *)addr;
if (kuap_is_disabled())
return;
if (flags)
allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
if (flags != KUAP_NONE) {
current->thread.kuap = flags;
kuap_unlock(flags, true);
}
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
unsigned long begin = regs->kuap & 0xf0000000;
unsigned long end = regs->kuap << 28;
unsigned long kuap = regs->kuap;
if (kuap_is_disabled())
return false;
if (!is_write || kuap == KUAP_ALL)
return false;
if (kuap == KUAP_NONE)
return true;
/* If faulting address doesn't match unlocked segment, unlock all */
if ((kuap ^ address) & 0xf0000000)
regs->kuap = KUAP_ALL;
return is_write && (address < begin || address >= end);
return false;
}
#endif /* CONFIG_PPC_KUAP */
......
......@@ -66,6 +66,16 @@ struct ppc_bat {
#ifndef __ASSEMBLY__
/*
* This macro defines the mapping from contexts to VSIDs (virtual
* segment IDs). We use a skew on both the context and the high 4 bits
* of the 32-bit virtual address (the "effective segment ID") in order
* to spread out the entries in the MMU hash table. Note, if this
* function is changed then hash functions will have to be
* changed to correspond.
*/
#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
/*
* Hardware Page Table Entry
* Note that the xpn and x bitfields are used only by processors that
......@@ -102,6 +112,37 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
#include <asm/reg.h>
#include <asm/task_size_32.h>
static __always_inline void update_user_segment(u32 n, u32 val)
{
if (n << 28 < TASK_SIZE)
mtsr(val + n * 0x111, n << 28);
}
static __always_inline void update_user_segments(u32 val)
{
val &= 0xf0ffffff;
update_user_segment(0, val);
update_user_segment(1, val);
update_user_segment(2, val);
update_user_segment(3, val);
update_user_segment(4, val);
update_user_segment(5, val);
update_user_segment(6, val);
update_user_segment(7, val);
update_user_segment(8, val);
update_user_segment(9, val);
update_user_segment(10, val);
update_user_segment(11, val);
update_user_segment(12, val);
update_user_segment(13, val);
update_user_segment(14, val);
update_user_segment(15, val);
}
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
......
......@@ -4,7 +4,43 @@
#include <asm-generic/pgtable-nopmd.h>
#include <asm/book3s/32/hash.h>
/*
* The "classic" 32-bit implementation of the PowerPC MMU uses a hash
* table containing PTEs, together with a set of 16 segment registers,
* to define the virtual to physical address mapping.
*
* We use the hash table as an extended TLB, i.e. a cache of currently
* active mappings. We maintain a two-level page table tree, much
* like that used by the i386, for the sake of the Linux memory
* management code. Low-level assembler code in hash_low_32.S
* (procedure hash_page) is responsible for extracting ptes from the
* tree and putting them into the hash table when necessary, and
* updating the accessed and modified bits in the page table tree.
*/
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
#define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT
/* We never clear the high word of the pte */
#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
#else
#define _PTE_NONE_MASK _PAGE_HASHPTE
#endif
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
/* And here we include common definitions */
......
......@@ -398,8 +398,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user
#endif /* !CONFIG_PPC_KUAP */
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
static inline void prevent_user_access(unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
......
......@@ -232,6 +232,9 @@ extern unsigned long __pmd_frag_size_shift;
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)
......
......@@ -91,7 +91,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
}
#define HAVE_ARCH_CSUM_ADD
static inline __wsum csum_add(__wsum csum, __wsum addend)
static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
......
......@@ -23,13 +23,13 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
int patch_branch(struct ppc_inst *addr, unsigned long target, int flags);
int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
int patch_branch(u32 *addr, unsigned long target, int flags);
int patch_instruction(u32 *addr, struct ppc_inst instr);
int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
static inline unsigned long patch_site_addr(s32 *site)
{
......@@ -38,18 +38,18 @@ static inline unsigned long patch_site_addr(s32 *site)
static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
{
return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr);
return patch_instruction((u32 *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags);
return patch_branch((u32 *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set));
return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
......@@ -59,10 +59,8 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
int instr_is_relative_branch(struct ppc_inst instr);
int instr_is_relative_link_branch(struct ppc_inst instr);
int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr);
unsigned long branch_target(const struct ppc_inst *instr);
int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
const struct ppc_inst *src);
unsigned long branch_target(const u32 *instr);
int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
......@@ -73,9 +71,9 @@ void __patch_exception(int exc, unsigned long addr);
#endif
#define OP_RT_RA_MASK 0xffff0000UL
#define LIS_R2 (PPC_INST_ADDIS | __PPC_RT(R2))
#define ADDIS_R2_R12 (PPC_INST_ADDIS | __PPC_RT(R2) | __PPC_RA(R12))
#define ADDI_R2_R2 (PPC_INST_ADDI | __PPC_RT(R2) | __PPC_RA(R2))
#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
static inline unsigned long ppc_function_entry(void *func)
......@@ -180,12 +178,10 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
#define R2_STACK_OFFSET 40
#endif
#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
/* usually preceded by a mflr r0 */
#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
......@@ -16,7 +16,7 @@
.section ".head.data.\name\()","a",@progbits
.endm
.macro use_ftsec name
.section ".head.text.\name\()"
.section ".head.text.\name\()","ax",@progbits
.endm
/*
......
......@@ -294,6 +294,13 @@
#define H_RESIZE_HPT_COMMIT 0x370
#define H_REGISTER_PROC_TBL 0x37C
#define H_SIGNAL_SYS_RESET 0x380
#define H_ALLOCATE_VAS_WINDOW 0x388
#define H_MODIFY_VAS_WINDOW 0x38C
#define H_DEALLOCATE_VAS_WINDOW 0x390
#define H_QUERY_VAS_WINDOW 0x394
#define H_QUERY_VAS_CAPABILITIES 0x398
#define H_QUERY_NX_CAPABILITIES 0x39C
#define H_GET_NX_FAULT 0x3A0
#define H_INT_GET_SOURCE_INFO 0x3A8
#define H_INT_SET_SOURCE_CONFIG 0x3AC
#define H_INT_GET_SOURCE_CONFIG 0x3B0
......@@ -393,6 +400,9 @@
#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
#define H_CPU_BEHAV_NO_STF_BARRIER (1ull << 54) // IBM bit 9
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
......
......@@ -18,8 +18,17 @@
* PACA flags in paca->irq_happened.
*
* This bits are set when interrupts occur while soft-disabled
* and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
* is set whenever we manually hard disable.
* and allow a proper replay.
*
* The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
* always in synch with the MSR[EE] state, except:
* - A window in interrupt entry, where hardware disables MSR[EE] and that
* must be "reconciled" with the soft mask state.
* - NMI interrupts that hit in awkward places, until they fix the state.
* - When local irqs are being enabled and state is being fixed up.
* - When returning from an interrupt there are some windows where this
* can become out of synch, but gets fixed before the RFI or before
* executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
*/
#define PACA_IRQ_HARD_DIS 0x01
#define PACA_IRQ_DBELL 0x02
......@@ -389,7 +398,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
static inline void may_hard_irq_enable(void) { }
static inline bool may_hard_irq_enable(void)
{
return false;
}
static inline void do_hard_irq_enable(void)
{
BUILD_BUG();
}
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
......
......@@ -8,17 +8,17 @@
#define ___get_user_instr(gu_op, dest, ptr) \
({ \
long __gui_ret = 0; \
unsigned long __gui_ptr = (unsigned long)ptr; \
long __gui_ret; \
u32 __user *__gui_ptr = (u32 __user *)ptr; \
struct ppc_inst __gui_inst; \
unsigned int __prefix, __suffix; \
__gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
\
__chk_user_ptr(ptr); \
__gui_ret = gu_op(__prefix, __gui_ptr); \
if (__gui_ret == 0) { \
if ((__prefix >> 26) == OP_PREFIX) { \
__gui_ret = gu_op(__suffix, \
(unsigned int __user *)__gui_ptr + 1); \
__gui_inst = ppc_inst_prefix(__prefix, \
__suffix); \
__gui_ret = gu_op(__suffix, __gui_ptr + 1); \
__gui_inst = ppc_inst_prefix(__prefix, __suffix); \
} else { \
__gui_inst = ppc_inst(__prefix); \
} \
......@@ -29,14 +29,15 @@
})
#else /* !CONFIG_PPC64 */
#define ___get_user_instr(gu_op, dest, ptr) \
gu_op((dest).val, (u32 __user *)(ptr))
({ \
__chk_user_ptr(ptr); \
gu_op((dest).val, (u32 __user *)(ptr)); \
})
#endif /* CONFIG_PPC64 */
#define get_user_instr(x, ptr) \
___get_user_instr(get_user, x, ptr)
#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
#define __get_user_instr(x, ptr) \
___get_user_instr(__get_user, x, ptr)
#define __get_user_instr(x, ptr) ___get_user_instr(__get_user, x, ptr)
/*
* Instruction data type for POWER
......@@ -59,9 +60,9 @@ static inline int ppc_inst_primary_opcode(struct ppc_inst x)
return ppc_inst_val(x) >> 26;
}
#ifdef CONFIG_PPC64
#define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff })
#define ppc_inst(x) ((struct ppc_inst){ .val = (x) })
#ifdef CONFIG_PPC64
#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
static inline u32 ppc_inst_suffix(struct ppc_inst x)
......@@ -69,68 +70,43 @@ static inline u32 ppc_inst_suffix(struct ppc_inst x)
return x.suffix;
}
static inline bool ppc_inst_prefixed(struct ppc_inst x)
{
return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff;
}
#else
#define ppc_inst_prefix(x, y) ppc_inst(x)
static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
static inline u32 ppc_inst_suffix(struct ppc_inst x)
{
return ppc_inst_prefix(swab32(ppc_inst_val(x)),
swab32(ppc_inst_suffix(x)));
return 0;
}
static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
{
u32 val, suffix;
val = *(u32 *)ptr;
if ((val >> 26) == OP_PREFIX) {
suffix = *((u32 *)ptr + 1);
return ppc_inst_prefix(val, suffix);
} else {
return ppc_inst(val);
}
}
#endif /* CONFIG_PPC64 */
static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
{
return *(u64 *)&x == *(u64 *)&y;
if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
return ppc_inst_prefix(*ptr, *(ptr + 1));
else
return ppc_inst(*ptr);
}
#else
#define ppc_inst(x) ((struct ppc_inst){ .val = x })
#define ppc_inst_prefix(x, y) ppc_inst(x)
static inline bool ppc_inst_prefixed(struct ppc_inst x)
{
return false;
}
static inline u32 ppc_inst_suffix(struct ppc_inst x)
{
return 0;
return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
}
static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
{
return ppc_inst(swab32(ppc_inst_val(x)));
}
static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
{
return *ptr;
return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
}
static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
{
return ppc_inst_val(x) == ppc_inst_val(y);
if (ppc_inst_val(x) != ppc_inst_val(y))
return false;
if (!ppc_inst_prefixed(x))
return true;
return ppc_inst_suffix(x) == ppc_inst_suffix(y);
}
#endif /* CONFIG_PPC64 */
static inline int ppc_inst_len(struct ppc_inst x)
{
return ppc_inst_prefixed(x) ? 8 : 4;
......@@ -140,13 +116,13 @@ static inline int ppc_inst_len(struct ppc_inst x)
* Return the address of the next instruction, if the instruction @value was
* located at @location.
*/
static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *value)
static inline u32 *ppc_inst_next(u32 *location, u32 *value)
{
struct ppc_inst tmp;
tmp = ppc_inst_read(value);
return location + ppc_inst_len(tmp);
return (void *)location + ppc_inst_len(tmp);
}
static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
......@@ -178,6 +154,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
int copy_inst_from_kernel_nofault(struct ppc_inst *inst, struct ppc_inst *src);
int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src);
#endif /* _ASM_POWERPC_INST_H */
......@@ -73,13 +73,47 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
#ifdef CONFIG_PPC_BOOK3S_64
extern char __end_soft_masked[];
bool search_kernel_soft_mask_table(unsigned long addr);
unsigned long search_kernel_restart_table(unsigned long addr);
DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
static inline bool is_implicit_soft_masked(struct pt_regs *regs)
{
if (regs->msr & MSR_PR)
return false;
if (regs->nip >= (unsigned long)__end_soft_masked)
return false;
return search_kernel_soft_mask_table(regs->nip);
}
static inline void srr_regs_clobbered(void)
{
local_paca->srr_valid = 0;
local_paca->hsrr_valid = 0;
}
#else
static inline bool is_implicit_soft_masked(struct pt_regs *regs)
{
return false;
}
static inline void srr_regs_clobbered(void)
{
}
#endif
static inline void nap_adjust_return(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_970_NAP
if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
/* Can avoid a test-and-clear because NMIs do not call this */
clear_thread_local_flags(_TLF_NAPPING);
regs->nip = (unsigned long)power4_idle_nap_return;
regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
}
#endif
}
......@@ -129,9 +163,18 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
if (TRAP(regs) != INTERRUPT_PROGRAM)
if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
BUG_ON(is_implicit_soft_masked(regs));
}
#ifdef CONFIG_PPC_BOOK3S
/* Move this under a debugging check */
if (arch_irq_disabled_regs(regs))
BUG_ON(search_kernel_restart_table(regs->nip));
#endif
}
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
#endif
booke_restore_dbcr0();
......@@ -186,6 +229,7 @@ struct interrupt_nmi_state {
u8 irq_soft_mask;
u8 irq_happened;
u8 ftrace_enabled;
u64 softe;
#endif
};
......@@ -211,6 +255,7 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
#ifdef CONFIG_PPC64
state->irq_soft_mask = local_paca->irq_soft_mask;
state->irq_happened = local_paca->irq_happened;
state->softe = regs->softe;
/*
* Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
......@@ -220,12 +265,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
regs->nip < (unsigned long)__end_interrupts) {
// Kernel code running below __end_interrupts is
// implicitly soft-masked.
if (is_implicit_soft_masked(regs)) {
// Adjust regs->softe soft implicit soft-mask, so
// arch_irq_disabled_regs(regs) behaves as expected.
regs->softe = IRQS_ALL_DISABLED;
}
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
/* Don't do any per-CPU operations until interrupt state is fixed */
......@@ -258,11 +304,20 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
*/
#ifdef CONFIG_PPC64
#ifdef CONFIG_PPC_BOOK3S
if (arch_irq_disabled_regs(regs)) {
unsigned long rst = search_kernel_restart_table(regs->nip);
if (rst)
regs_set_return_ip(regs, rst);
}
#endif
if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
/* Check we didn't change the pending interrupt mask. */
WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
regs->softe = state->softe;
local_paca->irq_happened = state->irq_happened;
local_paca->irq_soft_mask = state->irq_soft_mask;
#endif
......
......@@ -5,14 +5,6 @@
#define KUAP_READ 1
#define KUAP_WRITE 2
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
/*
* For prevent_user_access() only.
* Use the current saved situation instead of the to/from/size params.
* Used on book3s/32
*/
#define KUAP_CURRENT_READ 4
#define KUAP_CURRENT_WRITE 8
#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup.h>
......@@ -46,10 +38,7 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
void kuep_lock(void);
void kuep_unlock(void);
#else
#ifndef CONFIG_PPC_BOOK3S_32
static inline void kuep_lock(void) { }
static inline void kuep_unlock(void) { }
#endif
......@@ -83,8 +72,7 @@ static inline unsigned long kuap_get_and_assert_locked(void)
#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
......@@ -96,53 +84,53 @@ static __always_inline void setup_kup(void)
setup_kuap(disable_kuap);
}
static inline void allow_read_from_user(const void __user *from, unsigned long size)
static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
barrier_nospec();
allow_user_access(NULL, from, size, KUAP_READ);
}
static inline void allow_write_to_user(void __user *to, unsigned long size)
static __always_inline void allow_write_to_user(void __user *to, unsigned long size)
{
allow_user_access(to, NULL, size, KUAP_WRITE);
}
static inline void allow_read_write_user(void __user *to, const void __user *from,
unsigned long size)
static __always_inline void allow_read_write_user(void __user *to, const void __user *from,
unsigned long size)
{
barrier_nospec();
allow_user_access(to, from, size, KUAP_READ_WRITE);
}
static inline void prevent_read_from_user(const void __user *from, unsigned long size)
static __always_inline void prevent_read_from_user(const void __user *from, unsigned long size)
{
prevent_user_access(NULL, from, size, KUAP_READ);
prevent_user_access(KUAP_READ);
}
static inline void prevent_write_to_user(void __user *to, unsigned long size)
static __always_inline void prevent_write_to_user(void __user *to, unsigned long size)
{
prevent_user_access(to, NULL, size, KUAP_WRITE);
prevent_user_access(KUAP_WRITE);
}
static inline void prevent_read_write_user(void __user *to, const void __user *from,
unsigned long size)
static __always_inline void prevent_read_write_user(void __user *to, const void __user *from,
unsigned long size)
{
prevent_user_access(to, from, size, KUAP_READ_WRITE);
prevent_user_access(KUAP_READ_WRITE);
}
static inline void prevent_current_access_user(void)
static __always_inline void prevent_current_access_user(void)
{
prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
prevent_user_access(KUAP_READ_WRITE);
}
static inline void prevent_current_read_from_user(void)
static __always_inline void prevent_current_read_from_user(void)
{
prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ);
prevent_user_access(KUAP_READ);
}
static inline void prevent_current_write_to_user(void)
static __always_inline void prevent_current_write_to_user(void)
{
prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
prevent_user_access(KUAP_WRITE);
}
#endif /* !__ASSEMBLY__ */
......
......@@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void)
return static_branch_unlikely(&kvm_guest);
}
bool check_kvm_guest(void);
int check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
static inline bool check_kvm_guest(void) { return false; }
static inline int check_kvm_guest(void) { return 0; }
#endif
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
......@@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
struct pt_regs *regs = ftrace_get_regs(fregs);
regs->nip = ip;
regs_set_return_ip(regs, ip);
}
#define klp_get_ftrace_location klp_get_ftrace_location
......
......@@ -220,7 +220,7 @@ enum {
#elif defined(CONFIG_44x)
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
#endif
#if defined(CONFIG_E200) || defined(CONFIG_E500)
#ifdef CONFIG_E500
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
......@@ -324,7 +324,6 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
}
#endif /* !CONFIG_DEBUG_VM */
#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
......@@ -334,17 +333,6 @@ static inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
#else
static inline bool radix_enabled(void)
{
return false;
}
static inline bool early_radix_enabled(void)
{
return false;
}
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
......@@ -357,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void)
return false;
}
#endif
static inline bool strict_module_rwx_enabled(void)
{
return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
}
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
......
......@@ -57,7 +57,6 @@ static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
static inline void mm_iommu_init(struct mm_struct *mm) { }
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
......
......@@ -9,10 +9,22 @@
#ifndef __ASSEMBLY__
#include <linux/jump_label.h>
#include <asm/reg.h>
extern struct static_key_false disable_kuap_key;
static __always_inline bool kuap_is_disabled(void)
{
return static_branch_unlikely(&disable_kuap_key);
}
static inline void kuap_save_and_lock(struct pt_regs *regs)
{
if (kuap_is_disabled())
return;
regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
......@@ -23,12 +35,20 @@ static inline void kuap_user_restore(struct pt_regs *regs)
static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, regs->kuap);
}
static inline unsigned long kuap_get_and_assert_locked(void)
{
unsigned long kuap = mfspr(SPRN_MD_AP);
unsigned long kuap;
if (kuap_is_disabled())
return MD_APG_INIT;
kuap = mfspr(SPRN_MD_AP);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
......@@ -38,25 +58,35 @@ static inline unsigned long kuap_get_and_assert_locked(void)
static inline void kuap_assert_locked(void)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && !kuap_is_disabled())
kuap_get_and_assert_locked();
}
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
static inline void prevent_user_access(unsigned long dir)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = mfspr(SPRN_MD_AP);
unsigned long flags;
if (kuap_is_disabled())
return MD_APG_INIT;
flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
......@@ -65,12 +95,18 @@ static inline unsigned long prevent_user_access_return(void)
static inline void restore_user_access(unsigned long flags)
{
if (kuap_is_disabled())
return;
mtspr(SPRN_MD_AP, flags);
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
if (kuap_is_disabled())
return false;
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}
......
......@@ -113,6 +113,7 @@ typedef struct {
/* patch sites */
extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
extern s32 patch__tlb_44x_kuep, patch__tlb_47x_kuep;
#endif /* !__ASSEMBLY__ */
......
......@@ -149,11 +149,9 @@ struct paca_struct {
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S
mm_context_id_t mm_ctx_id;
#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
unsigned long mm_ctx_slb_addr_limit;
#else
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
......@@ -167,8 +165,15 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
#ifdef CONFIG_PPC64
u64 exit_save_r1; /* Syscall/interrupt R1 save */
#endif
#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
#endif
#ifdef CONFIG_PPC_BOOK3S_64
u8 hsrr_valid; /* HSRRs set for HRFID */
u8 srr_valid; /* SRRs set for RFID */
#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
......
......@@ -41,6 +41,10 @@ struct mm_struct;
#ifndef __ASSEMBLY__
#ifndef MAX_PTRS_PER_PGD
#define MAX_PTRS_PER_PGD PTRS_PER_PGD
#endif
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
......@@ -72,6 +76,7 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
void poking_init(void);
extern unsigned long ioremap_bot;
......
......@@ -76,6 +76,40 @@
#define __REGA0_R30 30
#define __REGA0_R31 31
/* For use with PPC_RAW_() macros */
#define _R0 0
#define _R1 1
#define _R2 2
#define _R3 3
#define _R4 4
#define _R5 5
#define _R6 6
#define _R7 7
#define _R8 8
#define _R9 9
#define _R10 10
#define _R11 11
#define _R12 12
#define _R13 13
#define _R14 14
#define _R15 15
#define _R16 16
#define _R17 17
#define _R18 18
#define _R19 19
#define _R20 20
#define _R21 21
#define _R22 22
#define _R23 23
#define _R24 24
#define _R25 25
#define _R26 26
#define _R27 27
#define _R28 28
#define _R29 29
#define _R30 30
#define _R31 31
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
......@@ -222,13 +256,11 @@
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
#define PPC_INST_ISYNC 0x4c00012c
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MTMSRD 0x7c000164
#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_RFEBB 0x4c000124
......@@ -241,10 +273,10 @@
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_SC 0x44000002
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
#define PPC_INST_SETB 0x7c000100
#define PPC_INST_STSWI 0x7c0005aa
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_TRECHKPT 0x7c0007dd
......@@ -252,18 +284,9 @@
#define PPC_INST_TSR 0x7c0005dd
#define PPC_INST_LD 0xe8000000
#define PPC_INST_STD 0xf8000000
#define PPC_INST_MFLR 0x7c0802a6
#define PPC_INST_MTCTR 0x7c0903a6
#define PPC_INST_ADDI 0x38000000
#define PPC_INST_ADDIS 0x3c000000
#define PPC_INST_ADD 0x7c000214
#define PPC_INST_BLR 0x4e800020
#define PPC_INST_BCTR 0x4e800420
#define PPC_INST_BCTRL 0x4e800421
#define PPC_INST_DIVD 0x7c0003d2
#define PPC_INST_RLDICR 0x78000004
#define PPC_INST_ORI 0x60000000
#define PPC_INST_ORIS 0x64000000
#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BL 0x48000001
#define PPC_INST_BRANCH_COND 0x40800000
......@@ -323,6 +346,8 @@
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI((v) + 0x8000)
#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
......@@ -383,6 +408,10 @@
#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
#define PPC_RAW_SC() (0x44000002)
#define PPC_RAW_SYNC() (0x7c0004ac)
#define PPC_RAW_ISYNC() (0x4c00012c)
/*
* Define what the VSX XX1 form instructions will look like, then add
* the 128 bit load store instructions based on that.
......@@ -404,10 +433,10 @@
#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_PLXVP(xtp, i, a, pr) \
((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i)))
#define PPC_RAW_PSTXVP(xsp, i, a, pr) \
((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i)))
#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
#define PPC_RAW_NAP (0x4c000364)
#define PPC_RAW_SLEEP (0x4c0003a4)
#define PPC_RAW_WINKLE (0x4c0003e4)
......@@ -445,16 +474,17 @@
#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_NOP() (PPC_INST_NOP)
#define PPC_RAW_BLR() (PPC_INST_BLR)
#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
#define PPC_RAW_BLR() (0x4e800020)
#define PPC_RAW_BLRL() (0x4e800021)
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
#define PPC_RAW_MFLR(t) (PPC_INST_MFLR | ___PPC_RT(t))
#define PPC_RAW_BCTR() (PPC_INST_BCTR)
#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r))
#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
#define PPC_RAW_BCTR() (0x4e800420)
#define PPC_RAW_BCTRL() (0x4e800421)
#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
......@@ -499,8 +529,8 @@
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
......@@ -519,7 +549,7 @@
(0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
#define PPC_RAW_RLDICR(d, a, i, me) (PPC_INST_RLDICR | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
......@@ -533,6 +563,8 @@
#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
#define PPC_RAW_EIEIO() (0x7c0006ac)
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
......
......@@ -762,6 +762,21 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
#define SOFT_MASK_TABLE(_start, _end) \
stringify_in_c(.section __soft_mask_table,"a";)\
stringify_in_c(.balign 8;) \
stringify_in_c(.llong (_start);) \
stringify_in_c(.llong (_end);) \
stringify_in_c(.previous)
#define RESTART_TABLE(_start, _end, _target) \
stringify_in_c(.section __restart_table,"a";)\
stringify_in_c(.balign 8;) \
stringify_in_c(.llong (_start);) \
stringify_in_c(.llong (_end);) \
stringify_in_c(.llong (_target);) \
stringify_in_c(.previous)
#ifdef CONFIG_PPC_FSL_BOOK3E
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
......
......@@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t;
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
regs->msr |= MSR_SINGLESTEP;
regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* We turn off Critical Input Exception(CE) to ensure that the single
* step will be for the instruction we have the probe on; if we don't,
* it is possible we'd get the single step reported for CE.
*/
regs->msr &= ~MSR_CE;
regs_set_return_msr(regs, regs->msr & ~MSR_CE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
......
......@@ -276,7 +276,15 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
#ifdef CONFIG_PPC32
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
#define INIT_THREAD { \
.ksp = INIT_SP, \
.pgdir = swapper_pg_dir, \
.kuap = ~0UL, /* KUAP_NONE */ \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
}
#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
.ksp = INIT_SP, \
.pgdir = swapper_pg_dir, \
......@@ -339,17 +347,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define spin_end() HMT_medium()
#define spin_until_cond(cond) \
do { \
if (unlikely(!(cond))) { \
spin_begin(); \
do { \
spin_cpu_relax(); \
} while (!(cond)); \
spin_end(); \
} \
} while (0)
#endif
/* Check that a certain kernel stack pointer is valid in task_struct p */
......
......@@ -71,6 +71,7 @@ struct ps3_dma_region_ops;
* @bus_addr: The 'translated' bus address of the region.
* @len: The length in bytes of the region.
* @offset: The offset from the start of memory of the region.
* @dma_mask: Device dma_mask.
* @ioid: The IOID of the device who owns this region
* @chunk_list: Opaque variable used by the ioc page manager.
* @region_ops: struct ps3_dma_region_ops - dma region operations
......@@ -85,6 +86,7 @@ struct ps3_dma_region {
enum ps3_dma_region_type region_type;
unsigned long len;
unsigned long offset;
u64 dma_mask;
/* driver variables (set by ps3_dma_region_create) */
unsigned long bus_addr;
......@@ -232,7 +234,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) || defined(CONFIG_PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
......
......@@ -48,11 +48,12 @@ struct pt_regs
unsigned long result;
};
};
#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
union {
struct {
#ifdef CONFIG_PPC64
unsigned long ppr;
unsigned long exit_result;
#endif
union {
#ifdef CONFIG_PPC_KUAP
......@@ -68,6 +69,7 @@ struct pt_regs
};
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
};
#endif
};
#endif
......@@ -122,6 +124,41 @@ struct pt_regs
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
#include <asm/paca.h>
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
#define profile_pc(regs) instruction_pointer(regs)
#endif
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
static inline void set_return_regs_changed(void)
{
#ifdef CONFIG_PPC_BOOK3S_64
local_paca->hsrr_valid = 0;
local_paca->srr_valid = 0;
#endif
}
static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
{
regs->nip = ip;
set_return_regs_changed();
}
static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
{
regs->msr = msr;
set_return_regs_changed();
}
static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
{
regs_set_return_ip(regs, regs->nip + offset);
}
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
......@@ -131,7 +168,7 @@ static inline unsigned long instruction_pointer(struct pt_regs *regs)
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->nip = val;
regs_set_return_ip(regs, val);
}
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
......@@ -144,15 +181,6 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
return 0;
}
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
#define profile_pc(regs) instruction_pointer(regs)
#endif
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
......
......@@ -393,6 +393,7 @@
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
#define SPRN_TRIG2 0x372
#define SPRN_PMCR 0x374 /* Power Management Control Register */
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
......@@ -1435,8 +1436,6 @@ static inline void mtsr(u32 val, u32 idx)
}
#endif
#define proc_trap() asm volatile("trap")
extern unsigned long current_stack_frame(void);
register unsigned long current_stack_pointer asm("r1");
......@@ -1447,16 +1446,6 @@ extern void scom970_write(unsigned int address, unsigned long value);
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
static inline void update_power8_hid0(unsigned long hid0)
{
/*
* The HID0 update on Power8 should at the very least be
* preceded by a SYNC instruction followed by an ISYNC
* instruction
*/
asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
......@@ -92,6 +92,9 @@ static inline bool security_ftr_enabled(u64 feature)
// The L1-D cache should be flushed after user accesses from the kernel
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
// The STF flush should be executed on privilege state switch
#define SEC_FTR_STF_BARRIER 0x0000000000010000ull
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
......@@ -99,6 +102,7 @@ static inline bool security_ftr_enabled(u64 feature)
SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_L1D_FLUSH_ENTRY | \
SEC_FTR_L1D_FLUSH_UACCESS | \
SEC_FTR_STF_BARRIER | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_SET_MEMORY_H
#define _ASM_POWERPC_SET_MEMORY_H
#define SET_MEMORY_RO 0
#define SET_MEMORY_RW 1
#define SET_MEMORY_NX 2
#define SET_MEMORY_X 3
int change_memory_attr(unsigned long addr, int numpages, long action);
static inline int set_memory_ro(unsigned long addr, int numpages)
{
return change_memory_attr(addr, numpages, SET_MEMORY_RO);
}
static inline int set_memory_rw(unsigned long addr, int numpages)
{
return change_memory_attr(addr, numpages, SET_MEMORY_RW);
}
static inline int set_memory_nx(unsigned long addr, int numpages)
{
return change_memory_attr(addr, numpages, SET_MEMORY_NX);
}
static inline int set_memory_x(unsigned long addr, int numpages)
{
return change_memory_attr(addr, numpages, SET_MEMORY_X);
}
int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
#endif
......@@ -10,7 +10,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern bool init_mem_is_free;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
......
......@@ -13,12 +13,11 @@ struct pt_regs;
* we don't allow putting a breakpoint on an mtmsrd instruction.
* Similarly we don't allow breakpoints on rfid instructions.
* These macros tell us if an instruction is a mtmsrd or rfid.
* Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
* and an mtmsrd (64-bit).
* Note that these return true for both mtmsr/rfi (32-bit)
* and mtmsrd/rfid (64-bit).
*/
#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
#define IS_RFI(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
......
......@@ -24,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
struct ppc_inst insn;
struct ppc_inst ixol;
u32 insn[2];
u32 ixol[2];
};
};
......
......@@ -5,8 +5,10 @@
#ifndef _ASM_POWERPC_VAS_H
#define _ASM_POWERPC_VAS_H
struct vas_window;
#include <linux/sched/mm.h>
#include <linux/mmu_context.h>
#include <asm/icswx.h>
#include <uapi/asm/vas-api.h>
/*
* Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
......@@ -48,6 +50,64 @@ enum vas_cop_type {
VAS_COP_TYPE_MAX,
};
/*
* User space VAS windows are opened by tasks and take references
* to pid and mm until windows are closed.
* Stores pid, mm, and tgid for each window.
*/
struct vas_user_win_ref {
struct pid *pid; /* PID of owner */
struct pid *tgid; /* Thread group ID of owner */
struct mm_struct *mm; /* Linux process mm_struct */
};
/*
* Common VAS window struct on PowerNV and PowerVM
*/
struct vas_window {
u32 winid;
u32 wcreds_max; /* Window credits */
enum vas_cop_type cop;
struct vas_user_win_ref task_ref;
char *dbgname;
struct dentry *dbgdir;
};
/*
* User space window operations used for powernv and powerVM
*/
struct vas_user_win_ops {
struct vas_window * (*open_win)(int vas_id, u64 flags,
enum vas_cop_type);
u64 (*paste_addr)(struct vas_window *);
int (*close_win)(struct vas_window *);
};
static inline void put_vas_user_win_ref(struct vas_user_win_ref *ref)
{
/* Drop references to pid, tgid, and mm */
put_pid(ref->pid);
put_pid(ref->tgid);
if (ref->mm)
mmdrop(ref->mm);
}
static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
{
mm_context_add_vas_window(ref->mm);
/*
* Even a process that has no foreign real address mapping can
* use an unpaired COPY instruction (to no real effect). Issue
* CP_ABORT to clear any pending COPY and prevent a covert
* channel.
*
* __switch_to() will issue CP_ABORT on future context switches
* if process / thread has any open VAS window (Use
* current->mm->context.vas_windows).
*/
asm volatile(PPC_CP_ABORT);
}
/*
* Receive window attributes specified by the (in-kernel) owner of window.
*/
......@@ -100,6 +160,7 @@ struct vas_tx_win_attr {
bool rx_win_ord_mode;
};
#ifdef CONFIG_PPC_POWERNV
/*
* Helper to map a chip id to VAS id.
* For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
......@@ -162,6 +223,43 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
const char *name);
void vas_unregister_api_powernv(void);
#endif
#ifdef CONFIG_PPC_PSERIES
/* VAS Capabilities */
#define VAS_GZIP_QOS_FEAT 0x1
#define VAS_GZIP_DEF_FEAT 0x2
#define VAS_GZIP_QOS_FEAT_BIT PPC_BIT(VAS_GZIP_QOS_FEAT) /* Bit 1 */
#define VAS_GZIP_DEF_FEAT_BIT PPC_BIT(VAS_GZIP_DEF_FEAT) /* Bit 2 */
/* NX Capabilities */
#define VAS_NX_GZIP_FEAT 0x1
#define VAS_NX_GZIP_FEAT_BIT PPC_BIT(VAS_NX_GZIP_FEAT) /* Bit 1 */
/*
* These structs are used to retrieve overall VAS capabilities that
* the hypervisor provides.
*/
struct hv_vas_all_caps {
__be64 descriptor;
__be64 feat_type;
} __packed __aligned(0x1000);
struct vas_all_caps {
u64 descriptor;
u64 feat_type;
};
int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result);
int vas_register_api_pseries(struct module *mod,
enum vas_cop_type cop_type, const char *name);
void vas_unregister_api_pseries(void);
#endif
/*
* Register / unregister coprocessor type to VAS API which will be exported
* to user space. Applications can use this API to open / close window
......@@ -171,7 +269,12 @@ int vas_paste_crb(struct vas_window *win, int offset, bool re);
* used for others in future.
*/
int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
const char *name);
const char *name,
const struct vas_user_win_ops *vops);
void vas_unregister_coproc_api(void);
int get_vas_user_win_ref(struct vas_user_win_ref *task_ref);
void vas_update_csb(struct coprocessor_request_block *crb,
struct vas_user_win_ref *task_ref);
void vas_dump_crb(struct coprocessor_request_block *crb);
#endif /* __ASM_POWERPC_VAS_H */
......@@ -65,8 +65,12 @@ struct icp_ops {
extern const struct icp_ops *icp_ops;
#ifdef CONFIG_PPC_ICS_NATIVE
/* Native ICS */
extern int ics_native_init(void);
#else
static inline int ics_native_init(void) { return -ENODEV; }
#endif
/* RTAS ICS */
#ifdef CONFIG_PPC_ICS_RTAS
......
......@@ -77,6 +77,9 @@
/* Indicate that the 'dimm_fuel_gauge' field is valid */
#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1
/* Indicate that the 'dimm_dsc' field is valid */
#define PDSM_DIMM_DSC_VALID 2
/*
* Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
* Various flags indicate the health status of the dimm.
......@@ -105,6 +108,9 @@ struct nd_papr_pdsm_health {
/* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */
__u16 dimm_fuel_gauge;
/* Extension flag PDSM_DIMM_DSC_VALID */
__u64 dimm_dsc;
};
__u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
};
......
......@@ -13,11 +13,15 @@
#define VAS_MAGIC 'v'
#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
/* Flags to VAS TX open window ioctl */
/* To allocate a window with QoS credit, otherwise use default credit */
#define VAS_TX_WIN_FLAG_QOS_CREDIT 0x0000000000000001
struct vas_tx_win_open_attr {
__u32 version;
__s16 vas_id; /* specific instance of vas or -1 for default */
__u16 reserved1;
__u64 flags; /* Future use */
__u64 flags;
__u64 reserved2[6];
};
......
......@@ -86,11 +86,7 @@ int main(void)
OFFSET(PACA_CANARY, paca_struct, canary);
#endif
#endif
OFFSET(MMCONTEXTID, mm_struct, context.id);
#ifdef CONFIG_PPC64
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
#else
#ifdef CONFIG_PPC32
#ifdef CONFIG_PPC_RTAS
OFFSET(RTAS_SP, thread_struct, rtas_sp);
#endif
......@@ -119,7 +115,6 @@ int main(void)
#ifdef CONFIG_ALTIVEC
OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
OFFSET(THREAD_USED_VR, thread_struct, used_vr);
OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
......@@ -150,22 +145,15 @@ int main(void)
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
OFFSET(THREAD_ACC, thread_struct, acc);
OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
OFFSET(KUAP, thread_struct, kuap);
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
......@@ -185,19 +173,12 @@ int main(void)
sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
OFFSET(TI_PREEMPT, thread_info, preempt_count);
#ifdef CONFIG_PPC64
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
OFFSET(PACAPACAINDEX, paca_struct, paca_index);
OFFSET(PACAPROCSTART, paca_struct, cpu_start);
OFFSET(PACAKSAVE, paca_struct, kstack);
......@@ -209,18 +190,13 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
OFFSET(PACASRR_VALID, paca_struct, srr_valid);
#endif
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
#ifdef CONFIG_PPC_BOOK3S
OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
#ifdef CONFIG_PPC_MM_SLICES
OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */
#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACAPGD, paca_struct, pgd);
......@@ -241,21 +217,9 @@ int main(void)
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACASLBCACHE, paca_struct, slb_cache);
OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
OFFSET(PACASTABRR, paca_struct, stab_rr);
OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
#ifdef CONFIG_PPC_MM_SLICES
OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
#else
OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
#endif /* CONFIG_PPC_MM_SLICES */
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
#ifdef CONFIG_PPC_PSERIES
OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
#endif
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
......@@ -264,9 +228,7 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
#endif /* CONFIG_PPC_BOOK3S_64 */
OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
......@@ -282,6 +244,9 @@ int main(void)
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
#ifdef CONFIG_PPC64
OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
......@@ -343,10 +308,6 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif
#ifdef CONFIG_PPC_KUAP
STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
#endif
#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
......@@ -368,10 +329,6 @@ int main(void)
#endif
#endif
#ifndef CONFIG_PPC64
OFFSET(MM_PGD, mm_struct, pgd);
#endif /* ! CONFIG_PPC64 */
/* About the CPU features table */
OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
......@@ -404,13 +361,6 @@ int main(void)
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
#else
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
#endif
DEFINE(PTE_SIZE, sizeof(pte_t));
#ifdef CONFIG_KVM
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
......@@ -482,11 +432,9 @@ int main(void)
OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_RADIX, kvm, arch.radix);
OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
......@@ -514,7 +462,6 @@ int main(void)
OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
......@@ -525,7 +472,6 @@ int main(void)
OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
......@@ -645,10 +591,8 @@ int main(void)
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
......@@ -756,7 +700,6 @@ int main(void)
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
#ifdef CONFIG_PPC_8xx
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
......
......@@ -35,7 +35,7 @@ void __init reserve_kdump_trampoline(void)
static void __init create_trampoline(unsigned long addr)
{
struct ppc_inst *p = (struct ppc_inst *)addr;
u32 *p = (u32 *)addr;
/* The maximum range of a single instruction branch, is the current
* instruction's address + (32 MB - 4) bytes. For the trampoline we
......@@ -45,8 +45,8 @@ static void __init create_trampoline(unsigned long addr)
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
patch_instruction(p, ppc_inst(PPC_INST_NOP));
patch_branch((void *)p + 4, addr + PHYSICAL_START, 0);
patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
patch_branch(p + 1, addr + PHYSICAL_START, 0);
}
void __init setup_kdump_trampoline(void)
......
......@@ -32,6 +32,7 @@
#include <asm/barrier.h>
#include <asm/kup.h>
#include <asm/bug.h>
#include <asm/interrupt.h>
#include "head_32.h"
......@@ -74,6 +75,24 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
.globl transfer_to_syscall
transfer_to_syscall:
stw r11, GPR1(r1)
stw r11, 0(r1)
mflr r12
stw r12, _LINK(r1)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
#endif
lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
SAVE_GPR(2, r1)
addi r12,r12,STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r1)
li r2, INTERRUPT_SYSCALL
stw r12,8(r1)
stw r2,_TRAP(r1)
SAVE_GPR(0, r1)
SAVE_4GPRS(3, r1)
SAVE_2GPRS(7, r1)
addi r2,r10,-THREAD
SAVE_NVGPRS(r1)
/* Calling convention has r9 = orig r0, r10 = regs */
......@@ -176,28 +195,6 @@ _GLOBAL(_switch)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
mfmsr r11
li r0,MSR_FP /* Disable floating-point */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r0,r0,MSR_VEC@h /* Disable altivec */
mfspr r12,SPRN_VRSAVE /* save vrsave register value */
stw r12,THREAD+THREAD_VRSAVE(r2)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
BEGIN_FTR_SECTION
oris r0,r0,MSR_SPE@h /* Disable SPE */
mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
stw r12,THREAD+THREAD_SPEFSCR(r2)
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
and. r0,r0,r11 /* FP or altivec or SPE enabled? */
beq+ 1f
andc r11,r11,r0
mtmsr r11
isync
1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
......@@ -218,19 +215,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
mr r3,r2
addi r2,r4,-THREAD /* Update current */
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
lwz r0,THREAD+THREAD_VRSAVE(r2)
mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
BEGIN_FTR_SECTION
lwz r0,THREAD+THREAD_SPEFSCR(r2)
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
......
This diff is collapsed.
......@@ -38,9 +38,9 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
for (i = 0; i < (len / 4); i++) {
struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i]));
patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst);
patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst);
patch_instruction(epapr_ev_idle_start + i, inst);
#endif
}
......
......@@ -26,6 +26,10 @@
#include <asm/feature-fixups.h>
#include <asm/context_tracking.h>
/* 64e interrupt returns always use SRR registers */
#define fast_interrupt_return fast_interrupt_return_srr
#define interrupt_return interrupt_return_srr
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
* when taking special interrupts. For now we don't support that,
......@@ -897,6 +901,34 @@ kernel_dbg_exc:
bl unknown_exception
b interrupt_return
.macro SEARCH_RESTART_TABLE
#ifdef CONFIG_RELOCATABLE
ld r11,PACATOC(r13)
ld r14,__start___restart_table@got(r11)
ld r15,__stop___restart_table@got(r11)
#else
LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
#endif
300:
cmpd r14,r15
beq 302f
ld r11,0(r14)
cmpld r10,r11
blt 301f
ld r11,8(r14)
cmpld r10,r11
bge 301f
ld r11,16(r14)
b 303f
301:
addi r14,r14,24
b 300b
302:
li r11,0
303:
.endm
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
* accordingly and if the interrupt is level sensitive, we hard disable
......@@ -905,6 +937,9 @@ kernel_dbg_exc:
*/
.macro masked_interrupt_book3e paca_irq full_mask
std r14,PACA_EXGEN+EX_R14(r13)
std r15,PACA_EXGEN+EX_R15(r13)
lbz r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
......@@ -914,15 +949,23 @@ kernel_dbg_exc:
stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
rldicl r10,r11,48,1 /* clear MSR_EE */
rotldi r11,r10,16
xori r11,r11,MSR_EE /* clear MSR_EE */
mtspr SPRN_SRR1,r11
.endif
mfspr r10,SPRN_SRR0
SEARCH_RESTART_TABLE
cmpdi r11,0
beq 1f
mtspr SPRN_SRR0,r11 /* return to restart address */
1:
lwz r11,PACA_EXGEN+EX_CR(r13)
mtcr r11
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r14,PACA_EXGEN+EX_R14(r13)
ld r15,PACA_EXGEN+EX_R15(r13)
mfspr r13,SPRN_SPRG_GEN_SCRATCH
rfi
b .
......@@ -1282,7 +1325,12 @@ a2_tlbinit_code_start:
a2_tlbinit_after_linear_map:
/* Now we branch the new virtual address mapped by this entry */
#ifdef CONFIG_RELOCATABLE
ld r5,PACATOC(r13)
ld r3,1f@got(r5)
#else
LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
#endif
mtctr r3
bctr
......
This diff is collapsed.
......@@ -23,18 +23,20 @@ EXPORT_SYMBOL_GPL(powerpc_firmware_features);
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
DEFINE_STATIC_KEY_FALSE(kvm_guest);
bool check_kvm_guest(void)
int __init check_kvm_guest(void)
{
struct device_node *hyper_node;
hyper_node = of_find_node_by_path("/hypervisor");
if (!hyper_node)
return false;
return 0;
if (!of_device_is_compatible(hyper_node, "linux,kvm"))
return false;
return 0;
static_branch_enable(&kvm_guest);
return true;
return 0;
}
core_initcall(check_kvm_guest); // before kvm_guest_init()
#endif
......@@ -103,6 +103,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3S_64
li r4,0
stb r4,PACASRR_VALID(r13)
#endif
#endif
li r4,1
stb r4,THREAD_LOAD_FP(r5)
......
......@@ -142,42 +142,23 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
.macro SYSCALL_ENTRY trapno
mfspr r9, SPRN_SRR1
mfspr r10, SPRN_SRR0
mfspr r12, SPRN_SRR0
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */
lis r12, 1f@h
ori r12, r12, 1f@l
lis r10, 1f@h
ori r10, r10, 1f@l
mtspr SPRN_SRR1, r11
mtspr SPRN_SRR0, r12
mfspr r12,SPRN_SPRG_THREAD
mtspr SPRN_SRR0, r10
mfspr r10,SPRN_SPRG_THREAD
mr r11, r1
lwz r1,TASK_STACK-THREAD(r12)
tovirt(r12, r12)
lwz r1,TASK_STACK-THREAD(r10)
tovirt(r10, r10)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
rfi
1:
stw r11,GPR1(r1)
stw r11,0(r1)
mr r11, r1
stw r10,_NIP(r11)
mflr r10
stw r10, _LINK(r11)
mfcr r10
rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
stw r10,_CCR(r11) /* save registers */
#ifdef CONFIG_40x
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
#endif
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
stw r2,GPR2(r11)
addi r10,r10,STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r11)
li r2, \trapno
stw r10,8(r11)
stw r2,_TRAP(r11)
SAVE_GPR(0, r11)
SAVE_4GPRS(3, r11)
SAVE_2GPRS(7, r11)
addi r2,r12,-THREAD
stw r12,_NIP(r1)
mfcr r12
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
stw r12,_CCR(r1)
b transfer_to_syscall /* jump to handler */
.endm
......
......@@ -701,39 +701,3 @@ _GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR0_RST_SYSTEM@h
mtspr SPRN_DBCR0,r13
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is the second parameter.
*/
lis r5, abatron_pteptrs@ha
stw r4, abatron_pteptrs@l + 0x4(r5)
#endif
sync
mtspr SPRN_PID,r3
isync /* Need an isync to flush shadow */
/* TLBs after changing PID */
blr
/* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
.align 12
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
/* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
*/
abatron_pteptrs:
.space 8
......@@ -532,6 +532,10 @@ finish_tlb_load_44x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
#ifdef CONFIG_PPC_KUEP
0: rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
patch_site 0b, patch__tlb_44x_kuep
#endif
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
......@@ -743,6 +747,10 @@ finish_tlb_load_47x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
#ifdef CONFIG_PPC_KUEP
0: rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
patch_site 0b, patch__tlb_47x_kuep
#endif
1: tlbwe r11,r13,2
/* Done...restore registers and get out of here.
......@@ -780,20 +788,6 @@ _GLOBAL(__fixup_440A_mcheck)
sync
blr
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is the second parameter.
*/
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r4, 0x4(r5)
#endif
mtspr SPRN_PID,r3
isync /* Force context change */
blr
/*
* Init CPU state. This is called at boot time or for secondary CPUs
* to setup initial TLB entries, setup IVORs, etc...
......@@ -1239,34 +1233,8 @@ head_start_common:
isync
blr
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
.align PAGE_SHIFT
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
.space PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
*/
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
/*
* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
*/
abatron_pteptrs:
.space 8
#ifdef CONFIG_SMP
.data
.align 12
temp_boot_stack:
.space 1024
......
......@@ -194,8 +194,9 @@ CLOSE_FIXED_SECTION(first_256B)
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
/* This value is used to mark exception frames on the stack. */
exception_marker:
.tc ID_72656773_68657265[TC],0x7265677368657265
.tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
.previous
/*
......@@ -211,6 +212,8 @@ OPEN_TEXT_SECTION(0x100)
USE_TEXT_SECTION()
#include "interrupt_64.S"
#ifdef CONFIG_PPC_BOOK3E
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
......@@ -997,23 +1000,3 @@ start_here_common:
0: trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
.previous
/*
* We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the bss, which is page-aligned.
*/
.section ".bss"
/*
* pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
* We will need to find a better way to fix this
*/
.align 16
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
.globl empty_zero_page
empty_zero_page:
.space PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
......@@ -786,28 +786,3 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
/*
* We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the data segment,
* which is page-aligned.
*/
.data
.globl sdata
sdata:
.globl empty_zero_page
.align PAGE_SHIFT
empty_zero_page:
.space PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
/* Room for two PTE table pointers, usually the kernel and current user
* pointer to their respective root page table (pgdir).
*/
.globl abatron_pteptrs
abatron_pteptrs:
.space 8
......@@ -518,8 +518,6 @@ BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mtspr SPRN_RPA,r1
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r2
BEGIN_MMU_FTR_SECTION
li r0,1
mfspr r1,SPRN_SPRG_603_LRU
......@@ -531,9 +529,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
mtcrf 0x80,r2
tlbld r3
rfi
MMU_FTR_SECTION_ELSE
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r2
tlbld r3
rfi
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
DataAddressInvalid:
mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
......@@ -607,9 +611,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
mtcrf 0x80,r2
tlbld r3
rfi
MMU_FTR_SECTION_ELSE
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
mtcrf 0x80,r2
tlbld r3
rfi
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#ifndef CONFIG_ALTIVEC
#define altivec_assist_exception unknown_exception
......@@ -756,9 +766,6 @@ PerformanceMonitor:
* the kernel image to physical address PHYSICAL_START.
*/
relocate_kernel:
addis r9,r26,klimit@ha /* fetch klimit */
lwz r25,klimit@l(r9)
addis r25,r25,-KERNELBASE@h
lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */
......@@ -766,7 +773,8 @@ relocate_kernel:
addi r0,r3,4f@l /* jump to the address of 4f */
mtctr r0 /* in copy and do the rest. */
bctr /* jump to the copy */
4: mr r5,r25
4: lis r5,_end-KERNELBASE@h
ori r5,r5,_end-KERNELBASE@l
bl copy_and_flush /* copy the rest */
b turn_on_mmu
......@@ -924,12 +932,6 @@ _GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */
li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
#ifdef CONFIG_PPC_KUEP
oris r3, r3, SR_NX@h /* Set Nx */
#endif
#ifdef CONFIG_PPC_KUAP
oris r3, r3, SR_KS@h /* Set Ks */
#endif
li r4, 0
3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */
......@@ -1023,58 +1025,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
mtspr SPRN_SRR1,r4
rfi
/*
* void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
*
* Set up the segment registers for a new context.
*/
_ENTRY(switch_mmu_context)
lwz r3,MMCONTEXTID(r4)
cmpwi cr0,r3,0
blt- 4f
mulli r3,r3,897 /* multiply context by skew factor */
rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
#ifdef CONFIG_PPC_KUEP
oris r3, r3, SR_NX@h /* Set Nx */
#endif
#ifdef CONFIG_PPC_KUAP
oris r3, r3, SR_KS@h /* Set Ks */
#endif
li r0,NUM_USER_SEGMENTS
mtctr r0
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is passed as second argument.
*/
lwz r4, MM_PGD(r4)
lis r5, abatron_pteptrs@ha
stw r4, abatron_pteptrs@l + 0x4(r5)
#endif
BEGIN_MMU_FTR_SECTION
#ifndef CONFIG_BDI_SWITCH
lwz r4, MM_PGD(r4)
#endif
tophys(r4, r4)
rlwinm r4, r4, 4, 0xffff01ff
mtspr SPRN_SDR1, r4
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
li r4,0
isync
3:
mtsrin r3,r4
addi r3,r3,0x111 /* next VSID */
rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
sync
isync
blr
4: trap
EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
blr
EXPORT_SYMBOL(switch_mmu_context)
/*
* An undocumented "feature" of 604e requires that the v bit
* be cleared before changing BAT values.
......@@ -1256,61 +1206,4 @@ setup_usbgecko_bat:
blr
#endif
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
* We first disable the MMU, and then jump to the ROM reset address.
*
* r3 is the board info structure, r4 is the location for starting.
* I use this for building a small kernel that can load other kernels,
* rather than trying to write or rely on a rom monitor that can tftp load.
*/
.globl m8260_gorom
m8260_gorom:
mfmsr r0
rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
sync
mtmsr r0
sync
mfspr r11, SPRN_HID0
lis r10, 0
ori r10,r10,HID0_ICE|HID0_DCE
andc r11, r11, r10
mtspr SPRN_HID0, r11
isync
li r5, MSR_ME|MSR_RI
lis r6,2f@h
addis r6,r6,-KERNELBASE@h
ori r6,r6,2f@l
mtspr SPRN_SRR0,r6
mtspr SPRN_SRR1,r5
isync
sync
rfi
2:
mtlr r4
blr
#endif
/*
* We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the data segment,
* which is page-aligned.
*/
.data
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
/* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
*/
abatron_pteptrs:
.space 8
......@@ -128,37 +128,20 @@ BEGIN_FTR_SECTION
mr r12, r13
lwz r13, THREAD_NORMSAVE(2)(r10)
FTR_SECTION_ELSE
#endif
mfcr r12
#ifdef CONFIG_KVM_BOOKE_HV
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
#else
mfcr r12
#endif
mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11)
lwz r11, TASK_STACK - THREAD(r10)
mr r11, r1
lwz r1, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
stw r12, _CCR(r11) /* save various registers */
mflr r12
stw r12,_LINK(r11)
ALLOC_STACK_FRAME(r1, THREAD_SIZE - INT_FRAME_SIZE)
stw r12, _CCR(r1)
mfspr r12,SPRN_SRR0
stw r1, GPR1(r11)
stw r1, 0(r11)
mr r1, r11
stw r12,_NIP(r11)
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
lis r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
stw r2,GPR2(r11)
addi r12, r12, STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r11)
li r2, \trapno
stw r12, 8(r11)
stw r2,_TRAP(r11)
SAVE_GPR(0, r11)
SAVE_4GPRS(3, r11)
SAVE_2GPRS(7, r11)
addi r2,r10,-THREAD
stw r12,_NIP(r1)
b transfer_to_syscall /* jump to handler */
.endm
......
......@@ -985,20 +985,6 @@ _GLOBAL(abort)
mtspr SPRN_DBCR0,r13
isync
_GLOBAL(set_context)
#ifdef CONFIG_BDI_SWITCH
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is the second parameter.
*/
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r4, 0x4(r5)
#endif
mtspr SPRN_PID,r3
isync /* Force context change */
blr
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
......@@ -1226,26 +1212,3 @@ _GLOBAL(restore_to_as0)
*/
3: mr r3,r5
bl _start
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
.align 12
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
/*
* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
*/
abatron_pteptrs:
.space 8
......@@ -486,7 +486,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
return;
reset:
regs->msr &= ~MSR_SE;
regs_set_return_msr(regs, regs->msr & ~MSR_SE);
for (i = 0; i < nr_wp_slots(); i++) {
info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
__set_breakpoint(i, info);
......@@ -537,7 +537,7 @@ static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
current->thread.last_hit_ubp[i] = bp[i];
info[i] = NULL;
}
regs->msr |= MSR_SE;
regs_set_return_msr(regs, regs->msr | MSR_SE);
return false;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -121,6 +121,7 @@ void replay_soft_interrupts(void)
ppc_save_regs(&regs);
regs.softe = IRQS_ENABLED;
regs.msr |= MSR_EE;
again:
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
......@@ -217,6 +218,100 @@ static inline void replay_soft_interrupts_irqrestore(void)
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#endif
#ifdef CONFIG_CC_HAS_ASM_GOTO
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
/* Write the new soft-enabled value if it is a disable */
if (mask) {
irq_soft_mask_set(mask);
return;
}
/*
* After the stb, interrupts are unmasked and there are no interrupts
* pending replay. The restart sequence makes this atomic with
* respect to soft-masked interrupts. If this was just a simple code
* sequence, a soft-masked interrupt could become pending right after
* the comparison and before the stb.
*
* This allows interrupts to be unmasked without hard disabling, and
* also without new hard interrupts coming in ahead of pending ones.
*/
asm_volatile_goto(
"1: \n"
" lbz 9,%0(13) \n"
" cmpwi 9,0 \n"
" bne %l[happened] \n"
" stb 9,%1(13) \n"
"2: \n"
RESTART_TABLE(1b, 2b, 1b)
: : "i" (offsetof(struct paca_struct, irq_happened)),
"i" (offsetof(struct paca_struct, irq_soft_mask))
: "cr0", "r9"
: happened);
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(!(mfmsr() & MSR_EE));
return;
happened:
irq_happened = get_irq_happened();
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(!irq_happened);
if (irq_happened == PACA_IRQ_HARD_DIS) {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(mfmsr() & MSR_EE);
irq_soft_mask_set(IRQS_ENABLED);
local_paca->irq_happened = 0;
__hard_irq_enable();
return;
}
/* Have interrupts to replay, need to hard disable first */
if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
if (!(mfmsr() & MSR_EE)) {
/*
* An interrupt could have come in and cleared
* MSR[EE] and set IRQ_HARD_DIS, so check
* IRQ_HARD_DIS again and warn if it is still
* clear.
*/
irq_happened = get_irq_happened();
WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
}
}
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
} else {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
if (WARN_ON_ONCE(mfmsr() & MSR_EE))
__hard_irq_disable();
}
}
/*
* Disable preempt here, so that the below preempt_enable will
* perform resched if required (a replayed interrupt may set
* need_resched).
*/
preempt_disable();
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
replay_soft_interrupts_irqrestore();
local_paca->irq_happened = 0;
trace_hardirqs_on();
irq_soft_mask_set(IRQS_ENABLED);
__hard_irq_enable();
preempt_enable();
}
#else
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
......@@ -288,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
__hard_irq_enable();
preempt_enable();
}
#endif
EXPORT_SYMBOL(arch_local_irq_restore);
/*
......
......@@ -11,10 +11,10 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
struct ppc_inst *addr = (struct ppc_inst *)jump_entry_code(entry);
u32 *addr = (u32 *)jump_entry_code(entry);
if (type == JUMP_LABEL_JMP)
patch_branch(addr, jump_entry_target(entry), 0);
else
patch_instruction(addr, ppc_inst(PPC_INST_NOP));
patch_instruction(addr, ppc_inst(PPC_RAW_NOP()));
}
......@@ -147,7 +147,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 0;
if (*(u32 *)regs->nip == BREAK_INSTR)
regs->nip += BREAK_INSTR_SIZE;
regs_add_return_ip(regs, BREAK_INSTR_SIZE);
return 1;
}
......@@ -372,7 +372,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs->nip = pc;
regs_set_return_ip(regs, pc);
}
/*
......@@ -394,7 +394,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
case 'c':
/* handle the optional parameter */
if (kgdb_hex2long(&ptr, &addr))
linux_regs->nip = addr;
regs_set_return_ip(linux_regs, addr);
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
......@@ -402,9 +402,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DBCR0,
mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
linux_regs->msr |= MSR_DE;
regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE);
#else
linux_regs->msr |= MSR_SE;
regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE);
#endif
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
......@@ -417,11 +417,10 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
u32 instr, *addr = (u32 *)bpt->bpt_addr;
int err;
unsigned int instr;
struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
err = get_kernel_nofault(instr, (unsigned *) addr);
err = get_kernel_nofault(instr, addr);
if (err)
return err;
......@@ -429,7 +428,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return -EFAULT;
*(unsigned int *)bpt->saved_instr = instr;
*(u32 *)bpt->saved_instr = instr;
return 0;
}
......@@ -438,7 +437,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr = *(unsigned int *)bpt->saved_instr;
struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
u32 *addr = (u32 *)bpt->bpt_addr;
err = patch_instruction(addr, ppc_inst(instr));
if (err)
......
......@@ -39,7 +39,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* On powerpc, NIP is *before* this instruction for the
* pre handler
*/
regs->nip -= MCOUNT_INSN_SIZE;
regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
......@@ -48,7 +48,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* Emulate singlestep (and also recover regs->nip)
* as if there is a nop
*/
regs->nip += MCOUNT_INSN_SIZE;
regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
......
......@@ -19,11 +19,13 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <linux/moduleloader.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
#include <asm/inst.h>
#include <asm/set_memory.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
......@@ -103,28 +105,42 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
return addr;
}
void *alloc_insn_page(void)
{
void *page;
page = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
if (strict_module_rwx_enabled()) {
set_memory_ro((unsigned long)page, 1);
set_memory_x((unsigned long)page, 1);
}
return page;
}
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
struct kprobe *prev;
struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
struct ppc_inst insn = ppc_inst_read(p->addr);
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
} else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n");
ret = -EINVAL;
} else if ((unsigned long)p->addr & ~PAGE_MASK &&
ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
preempt_disable();
prev = get_kprobe(p->addr - 1);
preempt_enable_no_resched();
if (prev &&
ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
......@@ -138,7 +154,7 @@ int arch_prepare_kprobe(struct kprobe *p)
}
if (!ret) {
patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
patch_instruction(p->ainsn.insn, insn);
p->opcode = ppc_inst_val(insn);
}
......@@ -149,13 +165,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode)));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
......@@ -178,7 +194,7 @@ static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs
* variant as values in regs could play a part in
* if the trap is taken or not
*/
regs->nip = (unsigned long)p->ainsn.insn;
regs_set_return_ip(regs, (unsigned long)p->ainsn.insn);
}
static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
......@@ -228,7 +244,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
struct ppc_inst insn = ppc_inst_read(p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
......@@ -319,8 +335,9 @@ int kprobe_handler(struct pt_regs *regs)
kprobe_opcode_t insn = *p->ainsn.insn;
if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
/* Turn off 'trace' bits */
regs->msr &= ~MSR_SINGLESTEP;
regs->msr |= kcb->kprobe_saved_msr;
regs_set_return_msr(regs,
(regs->msr & ~MSR_SINGLESTEP) |
kcb->kprobe_saved_msr);
goto no_kprobe;
}
......@@ -415,7 +432,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* we end up emulating it in kprobe_handler(), which increments the nip
* again.
*/
regs->nip = orig_ret_address - 4;
regs_set_return_ip(regs, orig_ret_address - 4);
regs->link = orig_ret_address;
return 0;
......@@ -439,7 +456,7 @@ int kprobe_post_handler(struct pt_regs *regs)
if (!cur || user_mode(regs))
return 0;
len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
/* make sure we got here for instruction we have a kprobe on */
if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
return 0;
......@@ -450,8 +467,8 @@ int kprobe_post_handler(struct pt_regs *regs)
}
/* Adjust nip to after the single-stepped instruction */
regs->nip = (unsigned long)cur->addr + len;
regs->msr |= kcb->kprobe_saved_msr;
regs_set_return_ip(regs, (unsigned long)cur->addr + len);
regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
......@@ -490,9 +507,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->nip = (unsigned long)cur->addr;
regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
regs->msr |= kcb->kprobe_saved_msr;
regs_set_return_ip(regs, (unsigned long)cur->addr);
/* Turn off 'trace' bits */
regs_set_return_msr(regs,
(regs->msr & ~MSR_SINGLESTEP) |
kcb->kprobe_saved_msr);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
......@@ -506,7 +525,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs->nip = extable_fixup(entry);
regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
......
......@@ -274,7 +274,7 @@ void mce_common_process_ue(struct pt_regs *regs,
entry = search_kernel_exception_table(regs->nip);
if (entry) {
mce_err->ignore_event = true;
regs->nip = extable_fixup(entry);
regs_set_return_ip(regs, extable_fixup(entry));
}
}
......
This diff is collapsed.
......@@ -388,9 +388,3 @@ _GLOBAL(start_secondary_resume)
bl start_secondary
b .
#endif /* CONFIG_SMP */
/*
* This routine is just here to keep GCC happy - sigh...
*/
_GLOBAL(__main)
blr
......@@ -92,12 +92,14 @@ int module_finalize(const Elf_Ehdr *hdr,
static __always_inline void *
__module_alloc(unsigned long size, unsigned long start, unsigned long end)
{
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC,
return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
......
......@@ -145,10 +145,9 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
return 0;
if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
PPC_LO(val)))
if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
return 0;
return 1;
}
......@@ -175,16 +174,10 @@ static uint32_t do_plt_call(void *location,
entry++;
}
/*
* lis r12, sym@ha
* addi r12, r12, sym@l
* mtctr r12
* bctr
*/
entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
entry->jump[3] = PPC_INST_BCTR;
entry->jump[0] = PPC_RAW_LIS(_R12, PPC_HA(val));
entry->jump[1] = PPC_RAW_ADDI(_R12, _R12, PPC_LO(val));
entry->jump[2] = PPC_RAW_MTCTR(_R12);
entry->jump[3] = PPC_RAW_BCTR();
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
......
This diff is collapsed.
This diff is collapsed.
......@@ -346,10 +346,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
#ifdef CONFIG_PPC_BOOK3S
mm_context_t *context = &mm->context;
get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment