Commit 24318ae8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sh-for-5.16' of git://git.libc.org/linux-sh

Pull arch/sh updates from Rich Felker.

* tag 'sh-for-5.16' of git://git.libc.org/linux-sh:
  sh: pgtable-3level: Fix cast to pointer from integer of different size
  sh: fix READ/WRITE redefinition warnings
  sh: define __BIG_ENDIAN for math-emu
  sh: math-emu: drop unused functions
  sh: fix kconfig unmet dependency warning for FRAME_POINTER
  sh: Cleanup about SPARSE_IRQ
  sh: kdump: add some attribute to function
  maple: fix wrong return value of maple_bus_init().
  sh: boot: avoid unneeded rebuilds under arch/sh/boot/compressed/
  sh: boot: add intermediate vmlinux.bin* to targets instead of extra-y
  sh: boards: Fix the cacography in irq.c
  sh: check return code of request_irq
  sh: fix trivial misannotations
parents 6ea45c57 8518e694
...@@ -56,7 +56,6 @@ config SUPERH ...@@ -56,7 +56,6 @@ config SUPERH
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
select NO_DMA if !MMU && !DMA_COHERENT select NO_DMA if !MMU && !DMA_COHERENT
......
...@@ -54,6 +54,7 @@ config DUMP_CODE ...@@ -54,6 +54,7 @@ config DUMP_CODE
config DWARF_UNWINDER config DWARF_UNWINDER
bool "Enable the DWARF unwinder for stacktraces" bool "Enable the DWARF unwinder for stacktraces"
depends on DEBUG_KERNEL
select FRAME_POINTER select FRAME_POINTER
default n default n
help help
......
...@@ -26,8 +26,8 @@ enum { ...@@ -26,8 +26,8 @@ enum {
PCI_INTD, /* PCI int D */ PCI_INTD, /* PCI int D */
ATA, /* ATA */ ATA, /* ATA */
FATA, /* CF */ FATA, /* CF */
POWER, /* Power swtich */ POWER, /* Power switch */
BUTTON, /* Button swtich */ BUTTON, /* Button switch */
}; };
/* Vectors for LANDISK */ /* Vectors for LANDISK */
......
...@@ -27,8 +27,8 @@ suffix-$(CONFIG_KERNEL_XZ) := xz ...@@ -27,8 +27,8 @@ suffix-$(CONFIG_KERNEL_XZ) := xz
suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_KERNEL_LZO) := lzo
targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz \ targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz \
uImage.bz2 uImage.lzma uImage.xz uImage.lzo uImage.bin uImage.bz2 uImage.lzma uImage.xz uImage.lzo uImage.bin \
extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.xz vmlinux.bin.lzo
subdir- := compressed romimage subdir- := compressed romimage
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
ashiftrt.S
ashldi3.c
ashlsi3.S
ashrsi3.S
lshrsi3.S
vmlinux.bin.* vmlinux.bin.*
...@@ -5,12 +5,18 @@ ...@@ -5,12 +5,18 @@
# create a compressed vmlinux image from the original vmlinux # create a compressed vmlinux image from the original vmlinux
# #
targets := vmlinux vmlinux.bin vmlinux.bin.gz \ OBJECTS := head_32.o misc.o cache.o piggy.o \
vmlinux.bin.bz2 vmlinux.bin.lzma \ ashiftrt.o ashldi3.o ashrsi3.o ashlsi3.o lshrsi3.o
vmlinux.bin.xz vmlinux.bin.lzo \
head_32.o misc.o piggy.o # These were previously generated files. When you are building the kernel
# with O=, make sure to remove the stale files in the output tree. Otherwise,
# the build system wrongly compiles the stale ones.
ifdef building_out_of_srctree
$(shell rm -f $(addprefix $(obj)/, ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S lshrsi3.S))
endif
OBJECTS = $(obj)/head_32.o $(obj)/misc.o $(obj)/cache.o targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo $(OBJECTS)
GCOV_PROFILE := n GCOV_PROFILE := n
...@@ -33,21 +39,9 @@ ccflags-remove-$(CONFIG_MCOUNT) += -pg ...@@ -33,21 +39,9 @@ ccflags-remove-$(CONFIG_MCOUNT) += -pg
LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \ LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
-T $(obj)/../../kernel/vmlinux.lds -T $(obj)/../../kernel/vmlinux.lds
# KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
# Pull in the necessary libgcc bits from the in-kernel implementation.
#
lib1funcs-y := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S lshrsi3.S
lib1funcs-obj := \
$(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
KBUILD_CFLAGS += -I$(lib1funcs-dir) -DDISABLE_BRANCH_PROFILING
$(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE
$(call cmd,shipped)
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE $(obj)/vmlinux: $(addprefix $(obj)/, $(OBJECTS)) FORCE
$(call if_changed,ld) $(call if_changed,ld)
$(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.bin: vmlinux FORCE
......
/* SPDX-License-Identifier: GPL-2.0-only */
#include "../../lib/ashiftrt.S"
// SPDX-License-Identifier: GPL-2.0-only
#include "../../lib/ashldi3.c"
/* SPDX-License-Identifier: GPL-2.0-only */
#include "../../lib/ashlsi3.S"
/* SPDX-License-Identifier: GPL-2.0-only */
#include "../../lib/ashrsi3.S"
/* SPDX-License-Identifier: GPL-2.0-only */
#include "../../lib/lshrsi3.S"
...@@ -84,7 +84,8 @@ static inline __sum16 csum_fold(__wsum sum) ...@@ -84,7 +84,8 @@ static inline __sum16 csum_fold(__wsum sum)
*/ */
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{ {
unsigned int sum, __dummy0, __dummy1; __wsum sum;
unsigned int __dummy0, __dummy1;
__asm__ __volatile__( __asm__ __volatile__(
"mov.l @%1+, %0\n\t" "mov.l @%1+, %0\n\t"
...@@ -197,6 +198,6 @@ static inline __wsum csum_and_copy_to_user(const void *src, ...@@ -197,6 +198,6 @@ static inline __wsum csum_and_copy_to_user(const void *src,
{ {
if (!access_ok(dst, len)) if (!access_ok(dst, len))
return 0; return 0;
return csum_partial_copy_generic((__force const void *)src, dst, len); return csum_partial_copy_generic(src, (__force void *)dst, len);
} }
#endif /* __ASM_SH_CHECKSUM_H */ #endif /* __ASM_SH_CHECKSUM_H */
...@@ -5,17 +5,6 @@ ...@@ -5,17 +5,6 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/machvec.h> #include <asm/machvec.h>
/*
* Only legacy non-sparseirq platforms have to set a reasonably sane
* value here. sparseirq platforms allocate their irq_descs on the fly,
* so will expand automatically based on the number of registered IRQs.
*/
#ifdef CONFIG_SPARSE_IRQ
# define NR_IRQS 8
#else
# define NR_IRQS 512
#endif
/* /*
* This is a special IRQ number for indicating that no IRQ has been * This is a special IRQ number for indicating that no IRQ has been
* triggered and to simply ignore the IRQ dispatch. This is a special * triggered and to simply ignore the IRQ dispatch. This is a special
......
...@@ -13,6 +13,14 @@ ...@@ -13,6 +13,14 @@
#ifndef _SFP_MACHINE_H #ifndef _SFP_MACHINE_H
#define _SFP_MACHINE_H #define _SFP_MACHINE_H
#ifdef __BIG_ENDIAN__
#define __BYTE_ORDER __BIG_ENDIAN
#define __LITTLE_ENDIAN 0
#else
#define __BYTE_ORDER __LITTLE_ENDIAN
#define __BIG_ENDIAN 0
#endif
#define _FP_W_TYPE_SIZE 32 #define _FP_W_TYPE_SIZE 32
#define _FP_W_TYPE unsigned long #define _FP_W_TYPE unsigned long
#define _FP_WS_TYPE signed long #define _FP_WS_TYPE signed long
......
...@@ -68,7 +68,7 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -68,7 +68,7 @@ struct __large_struct { unsigned long buf[100]; };
({ \ ({ \
long __gu_err = -EFAULT; \ long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \ unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (likely(access_ok(__gu_addr, (size)))) \ if (likely(access_ok(__gu_addr, (size)))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
...@@ -124,7 +124,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -124,7 +124,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
* Clear the area and return remaining number of bytes * Clear the area and return remaining number of bytes
* (on failure. Usually it's 0.) * (on failure. Usually it's 0.)
*/ */
__kernel_size_t __clear_user(void *addr, __kernel_size_t size); __kernel_size_t __clear_user(void __user *addr, __kernel_size_t size);
#define clear_user(addr,n) \ #define clear_user(addr,n) \
({ \ ({ \
......
...@@ -73,8 +73,9 @@ static void shx3_prepare_cpus(unsigned int max_cpus) ...@@ -73,8 +73,9 @@ static void shx3_prepare_cpus(unsigned int max_cpus)
BUILD_BUG_ON(SMP_MSG_NR >= 8); BUILD_BUG_ON(SMP_MSG_NR >= 8);
for (i = 0; i < SMP_MSG_NR; i++) for (i = 0; i < SMP_MSG_NR; i++)
request_irq(104 + i, ipi_interrupt_handler, if (request_irq(104 + i, ipi_interrupt_handler,
IRQF_PERCPU, "IPI", (void *)(long)i); IRQF_PERCPU, "IPI", (void *)(long)i))
pr_err("Failed to request irq %d\n", i);
for (i = 0; i < max_cpus; i++) for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true); set_cpu_present(i, true);
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf) size_t csize, unsigned long offset, int userbuf)
{ {
void *vaddr; void __iomem *vaddr;
if (!csize) if (!csize)
return 0; return 0;
...@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ...@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (userbuf) { if (userbuf) {
if (copy_to_user(buf, (vaddr + offset), csize)) { if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) {
iounmap(vaddr); iounmap(vaddr);
return -EFAULT; return -EFAULT;
} }
......
...@@ -490,7 +490,7 @@ asmlinkage void do_address_error(struct pt_regs *regs, ...@@ -490,7 +490,7 @@ asmlinkage void do_address_error(struct pt_regs *regs,
inc_unaligned_user_access(); inc_unaligned_user_access();
oldfs = force_uaccess_begin(); oldfs = force_uaccess_begin();
if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1), if (copy_from_user(&instruction, (insn_size_t __user *)(regs->pc & ~1),
sizeof(instruction))) { sizeof(instruction))) {
force_uaccess_end(oldfs); force_uaccess_end(oldfs);
goto uspace_segv; goto uspace_segv;
...@@ -614,7 +614,7 @@ asmlinkage void do_reserved_inst(void) ...@@ -614,7 +614,7 @@ asmlinkage void do_reserved_inst(void)
unsigned short inst = 0; unsigned short inst = 0;
int err; int err;
get_user(inst, (unsigned short*)regs->pc); get_user(inst, (unsigned short __user *)regs->pc);
err = do_fpu_inst(inst, regs); err = do_fpu_inst(inst, regs);
if (!err) { if (!err) {
...@@ -699,9 +699,9 @@ asmlinkage void do_illegal_slot_inst(void) ...@@ -699,9 +699,9 @@ asmlinkage void do_illegal_slot_inst(void)
return; return;
#ifdef CONFIG_SH_FPU_EMU #ifdef CONFIG_SH_FPU_EMU
get_user(inst, (unsigned short *)regs->pc + 1); get_user(inst, (unsigned short __user *)regs->pc + 1);
if (!do_fpu_inst(inst, regs)) { if (!do_fpu_inst(inst, regs)) {
get_user(inst, (unsigned short *)regs->pc); get_user(inst, (unsigned short __user *)regs->pc);
if (!emulate_branch(inst, regs)) if (!emulate_branch(inst, regs))
return; return;
/* fault in branch.*/ /* fault in branch.*/
......
...@@ -51,8 +51,8 @@ ...@@ -51,8 +51,8 @@
#define Rn (regs->regs[n]) #define Rn (regs->regs[n])
#define Rm (regs->regs[m]) #define Rm (regs->regs[m])
#define WRITE(d,a) ({if(put_user(d, (typeof (d)*)a)) return -EFAULT;}) #define MWRITE(d,a) ({if(put_user(d, (typeof (d) __user *)a)) return -EFAULT;})
#define READ(d,a) ({if(get_user(d, (typeof (d)*)a)) return -EFAULT;}) #define MREAD(d,a) ({if(get_user(d, (typeof (d) __user *)a)) return -EFAULT;})
#define PACK_S(r,f) FP_PACK_SP(&r,f) #define PACK_S(r,f) FP_PACK_SP(&r,f)
#define UNPACK_S(f,r) FP_UNPACK_SP(f,&r) #define UNPACK_S(f,r) FP_UNPACK_SP(f,&r)
...@@ -157,11 +157,11 @@ fmov_idx_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, ...@@ -157,11 +157,11 @@ fmov_idx_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
{ {
if (FPSCR_SZ) { if (FPSCR_SZ) {
FMOV_EXT(n); FMOV_EXT(n);
READ(FRn, Rm + R0 + 4); MREAD(FRn, Rm + R0 + 4);
n++; n++;
READ(FRn, Rm + R0); MREAD(FRn, Rm + R0);
} else { } else {
READ(FRn, Rm + R0); MREAD(FRn, Rm + R0);
} }
return 0; return 0;
...@@ -173,11 +173,11 @@ fmov_mem_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, ...@@ -173,11 +173,11 @@ fmov_mem_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
{ {
if (FPSCR_SZ) { if (FPSCR_SZ) {
FMOV_EXT(n); FMOV_EXT(n);
READ(FRn, Rm + 4); MREAD(FRn, Rm + 4);
n++; n++;
READ(FRn, Rm); MREAD(FRn, Rm);
} else { } else {
READ(FRn, Rm); MREAD(FRn, Rm);
} }
return 0; return 0;
...@@ -189,12 +189,12 @@ fmov_inc_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, ...@@ -189,12 +189,12 @@ fmov_inc_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
{ {
if (FPSCR_SZ) { if (FPSCR_SZ) {
FMOV_EXT(n); FMOV_EXT(n);
READ(FRn, Rm + 4); MREAD(FRn, Rm + 4);
n++; n++;
READ(FRn, Rm); MREAD(FRn, Rm);
Rm += 8; Rm += 8;
} else { } else {
READ(FRn, Rm); MREAD(FRn, Rm);
Rm += 4; Rm += 4;
} }
...@@ -207,11 +207,11 @@ fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, ...@@ -207,11 +207,11 @@ fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
{ {
if (FPSCR_SZ) { if (FPSCR_SZ) {
FMOV_EXT(m); FMOV_EXT(m);
WRITE(FRm, Rn + R0 + 4); MWRITE(FRm, Rn + R0 + 4);
m++; m++;
WRITE(FRm, Rn + R0); MWRITE(FRm, Rn + R0);
} else { } else {
WRITE(FRm, Rn + R0); MWRITE(FRm, Rn + R0);
} }
return 0; return 0;
...@@ -223,11 +223,11 @@ fmov_reg_mem(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, ...@@ -223,11 +223,11 @@ fmov_reg_mem(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
{ {
if (FPSCR_SZ) { if (FPSCR_SZ) {
FMOV_EXT(m); FMOV_EXT(m);
WRITE(FRm, Rn + 4); MWRITE(FRm, Rn + 4);
m++; m++;
WRITE(FRm, Rn); MWRITE(FRm, Rn);
} else { } else {
WRITE(FRm, Rn); MWRITE(FRm, Rn);
} }
return 0; return 0;
...@@ -240,12 +240,12 @@ fmov_reg_dec(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, ...@@ -240,12 +240,12 @@ fmov_reg_dec(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
if (FPSCR_SZ) { if (FPSCR_SZ) {
FMOV_EXT(m); FMOV_EXT(m);
Rn -= 8; Rn -= 8;
WRITE(FRm, Rn + 4); MWRITE(FRm, Rn + 4);
m++; m++;
WRITE(FRm, Rn); MWRITE(FRm, Rn);
} else { } else {
Rn -= 4; Rn -= 4;
WRITE(FRm, Rn); MWRITE(FRm, Rn);
} }
return 0; return 0;
...@@ -445,11 +445,11 @@ id_sys(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code) ...@@ -445,11 +445,11 @@ id_sys(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code)
case 0x4052: case 0x4052:
case 0x4062: case 0x4062:
Rn -= 4; Rn -= 4;
WRITE(*reg, Rn); MWRITE(*reg, Rn);
break; break;
case 0x4056: case 0x4056:
case 0x4066: case 0x4066:
READ(*reg, Rn); MREAD(*reg, Rn);
Rn += 4; Rn += 4;
break; break;
default: default:
...@@ -467,109 +467,6 @@ static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_reg ...@@ -467,109 +467,6 @@ static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_reg
return id_sys(fregs, regs, code); return id_sys(fregs, regs, code);
} }
/**
* denormal_to_double - Given denormalized float number,
* store double float
*
* @fpu: Pointer to sh_fpu_soft structure
* @n: Index to FP register
*/
static void denormal_to_double(struct sh_fpu_soft_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
int exp = 1023 - 126;
if (x != 0 && (x & 0x7f800000) == 0) {
du = (x & 0x80000000);
while ((x & 0x00800000) == 0) {
x <<= 1;
exp--;
}
x &= 0x007fffff;
du |= (exp << 20) | (x >> 3);
dl = x << 29;
fpu->fp_regs[n] = du;
fpu->fp_regs[n+1] = dl;
}
}
/**
* ieee_fpe_handler - Handle denormalized number exception
*
* @regs: Pointer to register structure
*
* Returns 1 when it's handled (should not cause exception).
*/
static int ieee_fpe_handler(struct pt_regs *regs)
{
unsigned short insn = *(unsigned short *)regs->pc;
unsigned short finsn;
unsigned long nextpc;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
insn & 0xf};
if (nib[0] == 0xb ||
(nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
regs->pr = regs->pc + 4;
if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
if (regs->sr & 1)
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
nextpc = regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (insn == 0x000b) { /* rts */
nextpc = regs->pr;
finsn = *(unsigned short *) (regs->pc + 2);
} else {
nextpc = regs->pc + 2;
finsn = insn;
}
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) {
/* FPU error */
denormal_to_double (&tsk->thread.xstate->softfpu,
(finsn >> 8) & 0xf);
tsk->thread.xstate->softfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
task_thread_info(tsk)->status |= TS_USEDFPU;
} else {
force_sig_fault(SIGFPE, FPE_FLTINV,
(void __user *)regs->pc);
}
regs->pc = nextpc;
return 1;
}
return 0;
}
/** /**
* fpu_init - Initialize FPU registers * fpu_init - Initialize FPU registers
* @fpu: Pointer to software emulated FPU registers. * @fpu: Pointer to software emulated FPU registers.
......
...@@ -28,9 +28,9 @@ __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) ...@@ -28,9 +28,9 @@ __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n)
return 0; return 0;
} }
__kernel_size_t __clear_user(void *to, __kernel_size_t n) __kernel_size_t __clear_user(void __user *to, __kernel_size_t n)
{ {
memset(to, 0, n); memset((__force void *)to, 0, n);
return 0; return 0;
} }
......
...@@ -834,8 +834,10 @@ static int __init maple_bus_init(void) ...@@ -834,8 +834,10 @@ static int __init maple_bus_init(void)
maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
if (!maple_queue_cache) if (!maple_queue_cache) {
retval = -ENOMEM;
goto cleanup_bothirqs; goto cleanup_bothirqs;
}
INIT_LIST_HEAD(&maple_waitq); INIT_LIST_HEAD(&maple_waitq);
INIT_LIST_HEAD(&maple_sentq); INIT_LIST_HEAD(&maple_sentq);
...@@ -848,6 +850,7 @@ static int __init maple_bus_init(void) ...@@ -848,6 +850,7 @@ static int __init maple_bus_init(void)
if (!mdev[i]) { if (!mdev[i]) {
while (i-- > 0) while (i-- > 0)
maple_free_dev(mdev[i]); maple_free_dev(mdev[i]);
retval = -ENOMEM;
goto cleanup_cache; goto cleanup_cache;
} }
baseunits[i] = mdev[i]; baseunits[i] = mdev[i];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment