Commit 744465da authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20220325' of https://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - remove dependency on the compiler's libgcc

 - allow selection of internal kernel ABI via Kconfig

 - enable compiler plugins support for gcc-12 or newer

 - various minor cleanups and fixes

* tag 'xtensa-20220325' of https://github.com/jcmvbkbc/linux-xtensa:
  xtensa: define update_mmu_tlb function
  xtensa: fix xtensa_wsr always writing 0
  xtensa: enable plugin support
  xtensa: clean up kernel exit assembly code
  xtensa: rearrange NMI exit path
  xtensa: merge stack alignment definitions
  xtensa: fix DTC warning unit_address_format
  xtensa: fix stop_machine_cpuslocked call in patch_text
  xtensa: make secondary reset vector support conditional
  xtensa: add kernel ABI selection to Kconfig
  xtensa: don't link with libgcc
  xtensa: add helpers for division, remainder and shifts
  xtensa: add missing XCHAL_HAVE_WINDOWED check
  xtensa: use XCHAL_NUM_AREGS as pt_regs::areg size
  xtensa: rename PT_SIZE to PT_KERNEL_SIZE
  xtensa: Remove unused early_read_config_byte() et al declarations
  xtensa: use strscpy to copy strings
  net: xtensa: use strscpy to copy strings
parents 1f1c153e 1c4664fa
......@@ -21,6 +21,9 @@ config XTENSA
select DMA_REMAP if MMU
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select GENERIC_LIB_CMPDI2
select GENERIC_LIB_MULDI3
select GENERIC_LIB_UCMPDI2
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select HAVE_ARCH_AUDITSYSCALL
......@@ -32,6 +35,7 @@ config XTENSA
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS if GCC_VERSION >= 120000
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PCI
......@@ -89,6 +93,9 @@ config CPU_BIG_ENDIAN
config CPU_LITTLE_ENDIAN
def_bool !CPU_BIG_ENDIAN
config CC_HAVE_CALL0_ABI
def_bool $(success,test "$(shell,echo __XTENSA_CALL0_ABI__ | $(CC) -mabi=call0 -E -P - 2>/dev/null)" = 1)
menu "Processor type and features"
choice
......@@ -221,6 +228,15 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
config SECONDARY_RESET_VECTOR
bool "Secondary cores use alternative reset vector"
default y
depends on HAVE_SMP
help
Secondary cores may be configured to use alternative reset vector,
or all cores may use primary reset vector.
Say Y here to supply handler for the alternative reset location.
config FAST_SYSCALL_XTENSA
bool "Enable fast atomic syscalls"
default n
......@@ -247,6 +263,38 @@ config FAST_SYSCALL_SPILL_REGISTERS
If unsure, say N.
choice
prompt "Kernel ABI"
default KERNEL_ABI_DEFAULT
help
Select ABI for the kernel code. This ABI is independent of the
supported userspace ABI and any combination of the
kernel/userspace ABI is possible and should work.
In case both kernel and userspace support only call0 ABI
all register windows support code will be omitted from the
build.
If unsure, choose the default ABI.
config KERNEL_ABI_DEFAULT
bool "Default ABI"
help
Select this option to compile kernel code with the default ABI
selected for the toolchain.
Normally cores with windowed registers option use windowed ABI and
cores without it use call0 ABI.
config KERNEL_ABI_CALL0
bool "Call0 ABI" if CC_HAVE_CALL0_ABI
help
Select this option to compile kernel code with call0 ABI even with
toolchain that defaults to windowed ABI.
When this option is not selected the default toolchain ABI will
be used for the kernel code.
endchoice
config USER_ABI_CALL0
bool
......
......@@ -35,6 +35,10 @@ KBUILD_CFLAGS += -ffreestanding -D__linux__
KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
ifneq ($(CONFIG_KERNEL_ABI_CALL0),)
KBUILD_CFLAGS += -mabi=call0
KBUILD_AFLAGS += -mabi=call0
endif
KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
......@@ -51,13 +55,9 @@ KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
KBUILD_DEFCONFIG := iss_defconfig
# Find libgcc.a
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
head-y := arch/xtensa/kernel/head.o
libs-y += arch/xtensa/lib/ $(LIBGCC)
libs-y += arch/xtensa/lib/
boot := arch/xtensa/boot
......
......@@ -8,19 +8,19 @@ flash: flash@00000000 {
reg = <0x00000000 0x08000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
partition@0 {
label = "data";
reg = <0x00000000 0x06000000>;
};
partition@0x6000000 {
partition@6000000 {
label = "boot loader area";
reg = <0x06000000 0x00800000>;
};
partition@0x6800000 {
partition@6800000 {
label = "kernel image";
reg = <0x06800000 0x017e0000>;
};
partition@0x7fe0000 {
partition@7fe0000 {
label = "boot environment";
reg = <0x07fe0000 0x00020000>;
};
......
......@@ -8,19 +8,19 @@ flash: flash@08000000 {
reg = <0x08000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
partition@0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
};
partition@0x400000 {
partition@400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
partition@0xa00000 {
partition@a00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
partition@0xfe0000 {
partition@fe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};
......
......@@ -8,11 +8,11 @@ flash: flash@08000000 {
reg = <0x08000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
partition@0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
};
partition@0x3f0000 {
partition@3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};
......
......@@ -191,7 +191,39 @@
#endif
.endm
#define XTENSA_STACK_ALIGNMENT 16
.macro do_nsau cnt, val, tmp, a
#if XCHAL_HAVE_NSA
nsau \cnt, \val
#else
mov \a, \val
movi \cnt, 0
extui \tmp, \a, 16, 16
bnez \tmp, 0f
movi \cnt, 16
slli \a, \a, 16
0:
extui \tmp, \a, 24, 8
bnez \tmp, 1f
addi \cnt, \cnt, 8
slli \a, \a, 8
1:
movi \tmp, __nsau_data
extui \a, \a, 24, 8
add \tmp, \tmp, \a
l8ui \tmp, \tmp, 0
add \cnt, \cnt, \tmp
#endif /* !XCHAL_HAVE_NSA */
.endm
.macro do_abs dst, src, tmp
#if XCHAL_HAVE_ABS
abs \dst, \src
#else
neg \tmp, \src
movgez \tmp, \src, \src
mov \dst, \tmp
#endif
.endm
#if defined(__XTENSA_WINDOWED_ABI__)
......
......@@ -37,4 +37,11 @@
#endif
#endif
/* Xtensa ABI requires stack alignment to be at least 16 */
#if XCHAL_DATA_WIDTH > 16
#define XTENSA_STACK_ALIGNMENT XCHAL_DATA_WIDTH
#else
#define XTENSA_STACK_ALIGNMENT 16
#endif
#endif
......@@ -73,13 +73,4 @@ static inline void pcibios_init_resource(struct resource *res,
res->child = NULL;
}
/* These are used for config access before all the PCI probing has been done. */
int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
int early_read_config_word(struct pci_controller*, int, int, int, u16*);
int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
int early_write_config_byte(struct pci_controller*, int, int, int, u8);
int early_write_config_word(struct pci_controller*, int, int, int, u16);
int early_write_config_dword(struct pci_controller*, int, int, int, u32);
#endif /* _XTENSA_PCI_BRIDGE_H */
......@@ -412,6 +412,10 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
typedef pte_t *pte_addr_t;
void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
#define __HAVE_ARCH_UPDATE_MMU_TLB
#endif /* !defined (__ASSEMBLY__) */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
......
......@@ -18,11 +18,7 @@
#include <asm/types.h>
#include <asm/regs.h>
/* Xtensa ABI requires stack alignment to be at least 16 */
#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
#define ARCH_SLAB_MINALIGN STACK_ALIGN
#define ARCH_SLAB_MINALIGN XTENSA_STACK_ALIGNMENT
/*
* User space process size: 1 GB.
......@@ -239,8 +235,8 @@ extern unsigned long __get_wchan(struct task_struct *p);
#define xtensa_set_sr(x, sr) \
({ \
unsigned int v = (unsigned int)(x); \
__asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \
__asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \
"a"((unsigned int)(x))); \
})
#define xtensa_get_sr(sr) \
......
......@@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__
#include <asm/coprocessor.h>
#include <asm/core.h>
/*
* This struct defines the way the registers are stored on the
......@@ -77,14 +78,12 @@ struct pt_regs {
/* current register frame.
* Note: The ESF for kernel exceptions ends after 16 registers!
*/
unsigned long areg[16];
unsigned long areg[XCHAL_NUM_AREGS];
};
#include <asm/core.h>
# define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
(task_stack_page(tsk) + KERNEL_STACK_SIZE) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
......
......@@ -13,7 +13,8 @@ obj-$(CONFIG_MMU) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_SMP) += smp.o mxhead.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SECONDARY_RESET_VECTOR) += mxhead.o
obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
......
......@@ -63,7 +63,7 @@ int main(void)
DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_KERNEL_SIZE, offsetof(struct pt_regs, areg[16]));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
......
......@@ -341,8 +341,8 @@ KABI_W _bbsi.l a2, 3, 1f
/* Copy spill slots of a0 and a1 to imitate movsp
* in order to keep exception stack continuous
*/
l32i a3, a1, PT_SIZE
l32i a0, a1, PT_SIZE + 4
l32i a3, a1, PT_KERNEL_SIZE
l32i a0, a1, PT_KERNEL_SIZE + 4
s32e a3, a1, -16
s32e a0, a1, -12
#endif
......@@ -488,11 +488,12 @@ KABI_W or a3, a3, a2
common_exception_return:
#if XTENSA_FAKE_NMI
l32i a2, a1, PT_EXCCAUSE
movi a3, EXCCAUSE_MAPPED_NMI
beq a2, a3, .LNMIexit
l32i abi_tmp0, a1, PT_EXCCAUSE
movi abi_tmp1, EXCCAUSE_MAPPED_NMI
l32i abi_saved1, a1, PT_PS
beq abi_tmp0, abi_tmp1, .Lrestore_state
#endif
1:
.Ltif_loop:
irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_off
......@@ -503,7 +504,7 @@ common_exception_return:
l32i abi_saved1, a1, PT_PS
GET_THREAD_INFO(a2, a1)
l32i a4, a2, TI_FLAGS
_bbci.l abi_saved1, PS_UM_BIT, 6f
_bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel
/* Specific to a user exception exit:
* We need to check some flags for signal handling and rescheduling,
......@@ -512,12 +513,12 @@ common_exception_return:
* Note that we don't disable interrupts here.
*/
_bbsi.l a4, TIF_NEED_RESCHED, 3f
_bbsi.l a4, TIF_NEED_RESCHED, .Lresched
movi a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
bnone a4, a2, 5f
bnone a4, a2, .Lexit_tif_loop_user
2: l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
/* Call do_signal() */
......@@ -527,48 +528,41 @@ common_exception_return:
rsil a2, 0
mov abi_arg0, a1
abi_call do_notify_resume # int do_notify_resume(struct pt_regs*)
j 1b
3: /* Reschedule */
j .Ltif_loop
.Lresched:
#ifdef CONFIG_TRACE_IRQFLAGS
abi_call trace_hardirqs_on
#endif
rsil a2, 0
abi_call schedule # void schedule (void)
j 1b
j .Ltif_loop
.Lexit_tif_loop_kernel:
#ifdef CONFIG_PREEMPTION
6:
_bbci.l a4, TIF_NEED_RESCHED, 4f
_bbci.l a4, TIF_NEED_RESCHED, .Lrestore_state
/* Check current_thread_info->preempt_count */
l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f
bnez a4, .Lrestore_state
abi_call preempt_schedule_irq
j 4f
#endif
j .Lrestore_state
#if XTENSA_FAKE_NMI
.LNMIexit:
l32i abi_saved1, a1, PT_PS
_bbci.l abi_saved1, PS_UM_BIT, 4f
#endif
5:
.Lexit_tif_loop_user:
#ifdef CONFIG_HAVE_HW_BREAKPOINT
_bbci.l a4, TIF_DB_DISABLED, 7f
_bbci.l a4, TIF_DB_DISABLED, 1f
abi_call restore_dbreak
7:
1:
#endif
#ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state
abi_call check_tlb_sanity
#endif
6:
4:
.Lrestore_state:
#ifdef CONFIG_TRACE_IRQFLAGS
extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei a4, LOCKLEVEL, 1f
......@@ -606,7 +600,7 @@ user_exception_exit:
rsr a1, depc # restore stack pointer
l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9)
rotw -1 # we restore a4..a7
_bltui a6, 16, 1f # only have to restore current window?
_bltui a6, 16, .Lclear_regs # only have to restore current window?
/* The working registers are a0 and a3. We are restoring to
* a4..a7. Be careful not to destroy what we have just restored.
......@@ -618,18 +612,19 @@ user_exception_exit:
mov a2, a6
mov a3, a5
2: rotw -1 # a0..a3 become a4..a7
1: rotw -1 # a0..a3 become a4..a7
addi a3, a7, -4*4 # next iteration
addi a2, a6, -16 # decrementing Y in WMASK
l32i a4, a3, PT_AREG_END + 0
l32i a5, a3, PT_AREG_END + 4
l32i a6, a3, PT_AREG_END + 8
l32i a7, a3, PT_AREG_END + 12
_bgeui a2, 16, 2b
_bgeui a2, 16, 1b
/* Clear unrestored registers (don't leak anything to user-land */
1: rsr a0, windowbase
.Lclear_regs:
rsr a0, windowbase
rsr a3, sar
sub a3, a0, a3
beqz a3, 2f
......@@ -706,12 +701,12 @@ kernel_exception_exit:
addi a0, a1, -16
l32i a3, a0, 0
l32i a4, a0, 4
s32i a3, a1, PT_SIZE+0
s32i a4, a1, PT_SIZE+4
s32i a3, a1, PT_KERNEL_SIZE + 0
s32i a4, a1, PT_KERNEL_SIZE + 4
l32i a3, a0, 8
l32i a4, a0, 12
s32i a3, a1, PT_SIZE+8
s32i a4, a1, PT_SIZE+12
s32i a3, a1, PT_KERNEL_SIZE + 8
s32i a4, a1, PT_KERNEL_SIZE + 12
/* Common exception exit.
* We restore the special register and the current window frame, and
......@@ -821,7 +816,7 @@ ENTRY(debug_exception)
bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
addi a2, a1, -16-PT_SIZE # assume kernel stack
addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack
3:
l32i a0, a3, DT_DEBUG_SAVE
s32i a1, a2, PT_AREG1
......
......@@ -61,7 +61,7 @@ static void patch_text(unsigned long addr, const void *data, size_t sz)
.data = data,
};
stop_machine_cpuslocked(patch_text_stop_machine,
&patch, NULL);
&patch, cpu_online_mask);
} else {
unsigned long flags;
......
......@@ -37,11 +37,13 @@ _SetupOCD:
* xt-gdb to single step via DEBUG exceptions received directly
* by ocd.
*/
#if XCHAL_HAVE_WINDOWED
movi a1, 1
movi a0, 0
wsr a1, windowstart
wsr a0, windowbase
rsync
#endif
movi a1, LOCKLEVEL
wsr a1, ps
......
......@@ -232,10 +232,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
p->thread.ra = MAKE_RA_FOR_CALL(
(unsigned long)ret_from_fork, 0x1);
/* This does not copy all the regs.
* In a bout of brilliance or madness,
* ARs beyond a0-a15 exist past the end of the struct.
*/
*childregs = *regs;
childregs->areg[1] = usp;
childregs->areg[2] = 0;
......@@ -265,14 +261,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
childregs->wmask = 1;
childregs->windowstart = 1;
childregs->windowbase = 0;
} else {
int len = childregs->wmask & ~0xf;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
childregs->syscall = regs->syscall;
if (clone_flags & CLONE_SETTLS)
childregs->threadptr = tls;
} else {
......
......@@ -140,7 +140,7 @@ __tagtable(BP_TAG_FDT, parse_tag_fdt);
static int __init parse_tag_cmdline(const bp_tag_t* tag)
{
strlcpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
strscpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE);
return 0;
}
......@@ -230,7 +230,7 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(xtensa_dt_io_area, NULL);
if (!command_line[0])
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
#endif /* CONFIG_USE_OF */
......@@ -260,7 +260,7 @@ void __init init_arch(bp_tag_t *bp_start)
#ifdef CONFIG_CMDLINE_BOOL
if (!command_line[0])
strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
strscpy(command_line, default_command_line, COMMAND_LINE_SIZE);
#endif
/* Early hook for platforms */
......@@ -289,7 +289,7 @@ void __init setup_arch(char **cmdline_p)
*cmdline_p = command_line;
platform_setup(cmdline_p);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
/* Reserve some memory regions */
......@@ -349,7 +349,7 @@ void __init setup_arch(char **cmdline_p)
#endif /* CONFIG_VECTORS_ADDR */
#ifdef CONFIG_SMP
#ifdef CONFIG_SECONDARY_RESET_VECTOR
mem_reserve(__pa(_SecondaryResetVector_text_start),
__pa(_SecondaryResetVector_text_end));
#endif
......
......@@ -88,7 +88,7 @@ ENDPROC(_UserExceptionVector)
* Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0)
*
* We get this exception when we were already in kernel space.
* We decrement the current stack pointer (kernel) by PT_SIZE and
* We decrement the current stack pointer (kernel) by PT_KERNEL_SIZE and
* jump to the first-level handler associated with the exception cause.
*
* Note: we need to preserve space for the spill region.
......@@ -100,7 +100,7 @@ ENTRY(_KernelExceptionVector)
xsr a3, excsave1 # save a3, and get dispatch table
wsr a2, depc # save a2
addi a2, a1, -16-PT_SIZE # adjust stack pointer
addi a2, a1, -16 - PT_KERNEL_SIZE # adjust stack pointer
s32i a0, a2, PT_AREG0 # save a0 to ESF
rsr a0, exccause # retrieve exception cause
s32i a0, a2, PT_DEPC # mark it as a regular exception
......
......@@ -207,7 +207,7 @@ SECTIONS
RELOCATE_ENTRY(_xip_data, .data);
RELOCATE_ENTRY(_xip_init_data, .init.data);
#endif
#if defined(CONFIG_SMP)
#if defined(CONFIG_SECONDARY_RESET_VECTOR)
RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text);
#endif
......@@ -303,7 +303,7 @@ SECTIONS
#define LAST .DoubleExceptionVector.text
#endif
#if defined(CONFIG_SMP)
#if defined(CONFIG_SECONDARY_RESET_VECTOR)
SECTION_VECTOR4 (_SecondaryResetVector_text,
.SecondaryResetVector.text,
......
......@@ -59,32 +59,18 @@ extern long long __ashldi3(long long, int);
extern long long __lshrdi3(long long, int);
extern int __divsi3(int, int);
extern int __modsi3(int, int);
extern long long __muldi3(long long, long long);
extern int __mulsi3(int, int);
extern unsigned int __udivsi3(unsigned int, unsigned int);
extern unsigned int __umodsi3(unsigned int, unsigned int);
extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
extern int __ucmpdi2(int, int);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
EXPORT_SYMBOL(__ucmpdi2);
void __xtensa_libgcc_window_spill(void)
{
BUG();
}
EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
{
......
......@@ -4,5 +4,7 @@
#
lib-y += memcopy.o memset.o checksum.o \
ashldi3.o ashrdi3.o lshrdi3.o \
divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
usercopy.o strncpy_user.o strnlen_user.o
lib-$(CONFIG_PCI) += pci-auto.o
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
#ifdef __XTENSA_EB__
#define uh a2
#define ul a3
#else
#define uh a3
#define ul a2
#endif /* __XTENSA_EB__ */
ENTRY(__ashldi3)
abi_entry_default
ssl a4
bgei a4, 32, .Llow_only
src uh, uh, ul
sll ul, ul
abi_ret_default
.Llow_only:
sll uh, ul
movi ul, 0
abi_ret_default
ENDPROC(__ashldi3)
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
#ifdef __XTENSA_EB__
#define uh a2
#define ul a3
#else
#define uh a3
#define ul a2
#endif /* __XTENSA_EB__ */
ENTRY(__ashrdi3)
abi_entry_default
ssr a4
bgei a4, 32, .Lhigh_only
src ul, uh, ul
sra uh, uh
abi_ret_default
.Lhigh_only:
sra ul, uh
srai uh, uh, 31
abi_ret_default
ENDPROC(__ashrdi3)
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
ENTRY(__divsi3)
abi_entry_default
#if XCHAL_HAVE_DIV32
quos a2, a2, a3
#else
xor a7, a2, a3 /* sign = dividend ^ divisor */
do_abs a6, a2, a4 /* udividend = abs (dividend) */
do_abs a3, a3, a4 /* udivisor = abs (divisor) */
bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
do_nsau a5, a6, a2, a8 /* udividend_shift = nsau (udividend) */
do_nsau a4, a3, a2, a8 /* udivisor_shift = nsau (udivisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
ssl a4
sll a3, a3 /* udivisor <<= count */
movi a2, 0 /* quotient = 0 */
/* test-subtract-and-shift loop; one quotient bit on each iteration */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a6, a3, .Lzerobit
sub a6, a6, a3
addi a2, a2, 1
.Lzerobit:
slli a2, a2, 1
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
bltu a6, a3, .Lreturn
addi a2, a2, 1 /* increment if udividend >= udivisor */
.Lreturn:
neg a5, a2
movltz a2, a5, a7 /* return (sign < 0) ? -quotient : quotient */
abi_ret_default
.Lle_one:
beqz a3, .Lerror
neg a2, a6 /* if udivisor == 1, then return... */
movgez a2, a6, a7 /* (sign < 0) ? -udividend : udividend */
abi_ret_default
.Lspecial:
bltu a6, a3, .Lreturn0 /* if dividend < divisor, return 0 */
movi a2, 1
movi a4, -1
movltz a2, a4, a7 /* else return (sign < 0) ? -1 : 1 */
abi_ret_default
.Lerror:
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
abi_ret_default
ENDPROC(__divsi3)
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
#ifdef __XTENSA_EB__
#define uh a2
#define ul a3
#else
#define uh a3
#define ul a2
#endif /* __XTENSA_EB__ */
ENTRY(__lshrdi3)
abi_entry_default
ssr a4
bgei a4, 32, .Lhigh_only
src ul, uh, ul
srl uh, uh
abi_ret_default
.Lhigh_only:
srl ul, uh
movi uh, 0
abi_ret_default
ENDPROC(__lshrdi3)
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
ENTRY(__modsi3)
abi_entry_default
#if XCHAL_HAVE_DIV32
rems a2, a2, a3
#else
mov a7, a2 /* save original (signed) dividend */
do_abs a2, a2, a4 /* udividend = abs (dividend) */
do_abs a3, a3, a4 /* udivisor = abs (divisor) */
bltui a3, 2, .Lle_one /* check if udivisor <= 1 */
do_nsau a5, a2, a6, a8 /* udividend_shift = nsau (udividend) */
do_nsau a4, a3, a6, a8 /* udivisor_shift = nsau (udivisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */
ssl a4
sll a3, a3 /* udivisor <<= count */
/* test-subtract-and-shift loop */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a2, a3, .Lzerobit
sub a2, a2, a3
.Lzerobit:
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
.Lspecial:
bltu a2, a3, .Lreturn
sub a2, a2, a3 /* subtract again if udividend >= udivisor */
.Lreturn:
bgez a7, .Lpositive
neg a2, a2 /* if (dividend < 0), return -udividend */
.Lpositive:
abi_ret_default
.Lle_one:
bnez a3, .Lreturn0
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
abi_ret_default
ENDPROC(__modsi3)
#if !XCHAL_HAVE_NSA
.section .rodata
.align 4
.global __nsau_data
.type __nsau_data, @object
__nsau_data:
.byte 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4
.byte 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
.byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
.byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.size __nsau_data, . - __nsau_data
#endif /* !XCHAL_HAVE_NSA */
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
.macro do_addx2 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx2 \dst, \as, \at
#else
slli \tmp, \as, 1
add \dst, \tmp, \at
#endif
.endm
.macro do_addx4 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx4 \dst, \as, \at
#else
slli \tmp, \as, 2
add \dst, \tmp, \at
#endif
.endm
.macro do_addx8 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx8 \dst, \as, \at
#else
slli \tmp, \as, 3
add \dst, \tmp, \at
#endif
.endm
ENTRY(__mulsi3)
abi_entry_default
#if XCHAL_HAVE_MUL32
mull a2, a2, a3
#elif XCHAL_HAVE_MUL16
or a4, a2, a3
srai a4, a4, 16
bnez a4, .LMUL16
mul16u a2, a2, a3
abi_ret_default
.LMUL16:
srai a4, a2, 16
srai a5, a3, 16
mul16u a7, a4, a3
mul16u a6, a5, a2
mul16u a4, a2, a3
add a7, a7, a6
slli a7, a7, 16
add a2, a7, a4
#elif XCHAL_HAVE_MAC16
mul.aa.hl a2, a3
mula.aa.lh a2, a3
rsr a5, ACCLO
umul.aa.ll a2, a3
rsr a4, ACCLO
slli a5, a5, 16
add a2, a4, a5
#else /* !MUL32 && !MUL16 && !MAC16 */
/* Multiply one bit at a time, but unroll the loop 4x to better
exploit the addx instructions and avoid overhead.
Peel the first iteration to save a cycle on init. */
/* Avoid negative numbers. */
xor a5, a2, a3 /* Top bit is 1 if one input is negative. */
do_abs a3, a3, a6
do_abs a2, a2, a6
/* Swap so the second argument is smaller. */
sub a7, a2, a3
mov a4, a3
movgez a4, a2, a7 /* a4 = max (a2, a3) */
movltz a3, a2, a7 /* a3 = min (a2, a3) */
movi a2, 0
extui a6, a3, 0, 1
movnez a2, a4, a6
do_addx2 a7, a4, a2, a7
extui a6, a3, 1, 1
movnez a2, a7, a6
do_addx4 a7, a4, a2, a7
extui a6, a3, 2, 1
movnez a2, a7, a6
do_addx8 a7, a4, a2, a7
extui a6, a3, 3, 1
movnez a2, a7, a6
bgeui a3, 16, .Lmult_main_loop
neg a3, a2
movltz a2, a3, a5
abi_ret_default
.align 4
.Lmult_main_loop:
srli a3, a3, 4
slli a4, a4, 4
add a7, a4, a2
extui a6, a3, 0, 1
movnez a2, a7, a6
do_addx2 a7, a4, a2, a7
extui a6, a3, 1, 1
movnez a2, a7, a6
do_addx4 a7, a4, a2, a7
extui a6, a3, 2, 1
movnez a2, a7, a6
do_addx8 a7, a4, a2, a7
extui a6, a3, 3, 1
movnez a2, a7, a6
bgeui a3, 16, .Lmult_main_loop
neg a3, a2
movltz a2, a3, a5
#endif /* !MUL32 && !MUL16 && !MAC16 */
abi_ret_default
ENDPROC(__mulsi3)
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
ENTRY(__udivsi3)
abi_entry_default
#if XCHAL_HAVE_DIV32
quou a2, a2, a3
#else
bltui a3, 2, .Lle_one /* check if the divisor <= 1 */
mov a6, a2 /* keep dividend in a6 */
do_nsau a5, a6, a2, a7 /* dividend_shift = nsau (dividend) */
do_nsau a4, a3, a2, a7 /* divisor_shift = nsau (divisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
ssl a4
sll a3, a3 /* divisor <<= count */
movi a2, 0 /* quotient = 0 */
/* test-subtract-and-shift loop; one quotient bit on each iteration */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a6, a3, .Lzerobit
sub a6, a6, a3
addi a2, a2, 1
.Lzerobit:
slli a2, a2, 1
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
bltu a6, a3, .Lreturn
addi a2, a2, 1 /* increment quotient if dividend >= divisor */
.Lreturn:
abi_ret_default
.Lle_one:
beqz a3, .Lerror /* if divisor == 1, return the dividend */
abi_ret_default
.Lspecial:
/* return dividend >= divisor */
bltu a6, a3, .Lreturn0
movi a2, 1
abi_ret_default
.Lerror:
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
abi_ret_default
ENDPROC(__udivsi3)
/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
#include <linux/linkage.h>
#include <asm/asmmacro.h>
#include <asm/core.h>
ENTRY(__umodsi3)
abi_entry_default
#if XCHAL_HAVE_DIV32
remu a2, a2, a3
#else
bltui a3, 2, .Lle_one /* check if the divisor is <= 1 */
do_nsau a5, a2, a6, a7 /* dividend_shift = nsau (dividend) */
do_nsau a4, a3, a6, a7 /* divisor_shift = nsau (divisor) */
bgeu a5, a4, .Lspecial
sub a4, a4, a5 /* count = divisor_shift - dividend_shift */
ssl a4
sll a3, a3 /* divisor <<= count */
/* test-subtract-and-shift loop */
#if XCHAL_HAVE_LOOPS
loopnez a4, .Lloopend
#endif /* XCHAL_HAVE_LOOPS */
.Lloop:
bltu a2, a3, .Lzerobit
sub a2, a2, a3
.Lzerobit:
srli a3, a3, 1
#if !XCHAL_HAVE_LOOPS
addi a4, a4, -1
bnez a4, .Lloop
#endif /* !XCHAL_HAVE_LOOPS */
.Lloopend:
.Lspecial:
bltu a2, a3, .Lreturn
sub a2, a2, a3 /* subtract once more if dividend >= divisor */
.Lreturn:
abi_ret_default
.Lle_one:
bnez a3, .Lreturn0
/* Divide by zero: Use an illegal instruction to force an exception.
The subsequent "DIV0" string can be recognized by the exception
handler to identify the real cause of the exception. */
ill
.ascii "DIV0"
.Lreturn0:
movi a2, 0
#endif /* XCHAL_HAVE_DIV32 */
abi_ret_default
ENDPROC(__umodsi3)
......@@ -162,6 +162,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
}
void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
local_flush_tlb_page(vma, address);
}
#ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr)
......
......@@ -174,7 +174,7 @@ static int tuntap_open(struct iss_net_private *lp)
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
strlcpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name));
strscpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name));
err = simc_ioctl(fd, TUNSETIFF, &ifr);
if (err < 0) {
......@@ -249,7 +249,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
return 0;
}
strlcpy(lp->tp.info.tuntap.dev_name, dev_name,
strscpy(lp->tp.info.tuntap.dev_name, dev_name,
sizeof(lp->tp.info.tuntap.dev_name));
setup_etheraddr(dev, mac_str);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment