Commit 2cc63b39 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-5.0-final' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC fixes from Vineet Gupta:
 "Fixes for ARC for 5.0, bunch of those are stable fodder anyways so
  sooner the better.

   - Fix memcpy to prevent prefetchw beyond end of buffer [Eugeniy]

   - Enable unaligned access early to prevent exceptions given newer gcc
     code gen [Eugeniy]

   - Tighten up uboot arg checking to prevent false negatives and also
     allow both jtag and bootloading to coexist w/o config option as
     needed by kernelCi folks [Eugeniy]

   - Set slab alignment to 8 for ARC to avoid the atomic64_t unalign
     [Alexey]

   - Disable regfile auto save on interrupts on HSDK platform due to a
     silicon issue [Vineet]

   - Avoid HS38x boot printing crash by not reading HS48x only reg
     [Vineet]"

* tag 'arc-5.0-final' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARCv2: don't assume core 0x54 has dual issue
  ARC: define ARCH_SLAB_MINALIGN = 8
  ARC: enable uboot support unconditionally
  ARC: U-boot: check arguments paranoidly
  ARCv2: support manual regfile save on interrupts
  ARC: uacces: remove lp_start, lp_end from clobber list
  ARC: fix actionpoints configuration detection
  ARCv2: lib: memcpy: fix doing prefetchw outside of buffer
  ARCv2: Enable unaligned access in early ASM code
parents 8456e98e 7b2e932f
...@@ -191,7 +191,6 @@ config NR_CPUS ...@@ -191,7 +191,6 @@ config NR_CPUS
config ARC_SMP_HALT_ON_RESET config ARC_SMP_HALT_ON_RESET
bool "Enable Halt-on-reset boot mode" bool "Enable Halt-on-reset boot mode"
default y if ARC_UBOOT_SUPPORT
help help
In SMP configuration cores can be configured as Halt-on-reset In SMP configuration cores can be configured as Halt-on-reset
or they could all start at same time. For Halt-on-reset, non or they could all start at same time. For Halt-on-reset, non
...@@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS ...@@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS
(also referred to as r58:r59). These can also be used by gcc as GPR so (also referred to as r58:r59). These can also be used by gcc as GPR so
kernel needs to save/restore per process kernel needs to save/restore per process
config ARC_IRQ_NO_AUTOSAVE
bool "Disable hardware autosave regfile on interrupts"
default n
help
On HS cores, taken interrupt auto saves the regfile on stack.
This is programmable and can be optionally disabled in which case
software INTERRUPT_PROLOGUE/EPILGUE do the needed work
endif # ISA_ARCV2 endif # ISA_ARCV2
endmenu # "ARC CPU Configuration" endmenu # "ARC CPU Configuration"
...@@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA ...@@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA
endif endif
config ARC_UBOOT_SUPPORT
bool "Support uboot arg Handling"
help
ARC Linux by default checks for uboot provided args as pointers to
external cmdline or DTB. This however breaks in absence of uboot,
when booting from Metaware debugger directly, as the registers are
not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
registers look like uboot args to kernel which then chokes.
So only enable the uboot arg checking/processing if users are sure
of uboot being in play.
config ARC_BUILTIN_DTB_NAME config ARC_BUILTIN_DTB_NAME
string "Built in DTB" string "Built in DTB"
help help
......
...@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5 ...@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
# CONFIG_ARC_HAS_LLSC is not set # CONFIG_ARC_HAS_LLSC is not set
CONFIG_ARC_KVADDR_SIZE=402 CONFIG_ARC_KVADDR_SIZE=402
CONFIG_ARC_EMUL_UNALIGNED=y CONFIG_ARC_EMUL_UNALIGNED=y
CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_UNIX=y CONFIG_UNIX=y
......
...@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y ...@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_ARC_PLAT_AXS10X=y CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS103=y CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y CONFIG_ISA_ARCV2=y
CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_NET=y CONFIG_NET=y
......
...@@ -15,8 +15,6 @@ CONFIG_AXS103=y ...@@ -15,8 +15,6 @@ CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y CONFIG_ISA_ARCV2=y
CONFIG_SMP=y CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set # CONFIG_ARC_TIMERS_64BIT is not set
# CONFIG_ARC_SMP_HALT_ON_RESET is not set
CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_NET=y CONFIG_NET=y
......
...@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 { ...@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
#endif #endif
}; };
struct bcr_uarch_build_arcv2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, prod:8, maj:8, min:8;
#else
unsigned int min:8, maj:8, prod:8, pad:8;
#endif
};
struct bcr_mpy { struct bcr_mpy {
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
......
...@@ -52,6 +52,17 @@ ...@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES #define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
/*
* Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
* ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
* alignment for any atomic64_t embedded in buffer.
* Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
* value of 4 (and not 8) in ARC ABI.
*/
#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
#define ARCH_SLAB_MINALIGN 8
#endif
extern void arc_cache_init(void); extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void); extern void read_decode_cache_bcr(void);
......
...@@ -17,6 +17,33 @@ ...@@ -17,6 +17,33 @@
; ;
; Now manually save: r12, sp, fp, gp, r25 ; Now manually save: r12, sp, fp, gp, r25
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
.ifnc \called_from, exception
st.as r9, [sp, -10] ; save r9 in it's final stack slot
sub sp, sp, 12 ; skip JLI, LDI, EI
PUSH lp_count
PUSHAX lp_start
PUSHAX lp_end
PUSH blink
PUSH r11
PUSH r10
sub sp, sp, 4 ; skip r9
PUSH r8
PUSH r7
PUSH r6
PUSH r5
PUSH r4
PUSH r3
PUSH r2
PUSH r1
PUSH r0
.endif
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS #ifdef CONFIG_ARC_HAS_ACCL_REGS
PUSH r59 PUSH r59
PUSH r58 PUSH r58
...@@ -86,6 +113,33 @@ ...@@ -86,6 +113,33 @@
POP r59 POP r59
#endif #endif
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
.ifnc \called_from, exception
POP r0
POP r1
POP r2
POP r3
POP r4
POP r5
POP r6
POP r7
POP r8
POP r9
POP r10
POP r11
POP blink
POPAX lp_end
POPAX lp_start
POP r9
mov lp_count, r9
add sp, sp, 12 ; skip JLI, LDI, EI
ld.as r9, [sp, -10] ; reload r9 which got clobbered
.endif
#endif
.endm .endm
/*------------------------------------------------------------------------*/ /*------------------------------------------------------------------------*/
......
...@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
*/ */
"=&r" (tmp), "+r" (to), "+r" (from) "=&r" (tmp), "+r" (to), "+r" (from)
: :
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return n; return n;
} }
...@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
*/ */
"=&r" (tmp), "+r" (to), "+r" (from) "=&r" (tmp), "+r" (to), "+r" (from)
: :
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return n; return n;
} }
...@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) ...@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n" " .previous \n"
: "+r"(d_char), "+r"(res) : "+r"(d_char), "+r"(res)
: "i"(0) : "i"(0)
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return res; return res;
} }
...@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) ...@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n" " .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count) : "g"(-EFAULT), "r"(count)
: "lp_count", "lp_start", "lp_end", "memory"); : "lp_count", "memory");
return res; return res;
} }
......
...@@ -209,7 +209,9 @@ restore_regs: ...@@ -209,7 +209,9 @@ restore_regs:
;####### Return from Intr ####### ;####### Return from Intr #######
debug_marker_l1: debug_marker_l1:
bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
btst r0, STATUS_DE_BIT ; Z flag set if bit clear
bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set
.Lisr_ret_fast_path: .Lisr_ret_fast_path:
; Handle special case #1: (Entry via Exception, Return via IRQ) ; Handle special case #1: (Entry via Exception, Return via IRQ)
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/entry.h> #include <asm/entry.h>
#include <asm/arcregs.h> #include <asm/arcregs.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP .macro CPU_EARLY_SETUP
...@@ -47,6 +48,15 @@ ...@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL] sr r5, [ARC_REG_DC_CTRL]
1: 1:
#ifdef CONFIG_ISA_ARCV2
; Unaligned access is disabled at reset, so re-enable early as
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default
lr r5, [status32]
bset r5, r5, STATUS_AD_BIT
kflag r5
#endif
.endm .endm
.section .init.text, "ax",@progbits .section .init.text, "ax",@progbits
...@@ -90,15 +100,13 @@ ENTRY(stext) ...@@ -90,15 +100,13 @@ ENTRY(stext)
st.ab 0, [r5, 4] st.ab 0, [r5, 4]
1: 1:
#ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI ; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
; r1 = magic number (board identity, unused as of now ; r1 = magic number (always zero as of now)
; r2 = pointer to uboot provided cmdline or external DTB in mem ; r2 = pointer to uboot provided cmdline or external DTB in mem
; These are handled later in setup_arch() ; These are handled later in handle_uboot_args()
st r0, [@uboot_tag] st r0, [@uboot_tag]
st r2, [@uboot_arg] st r2, [@uboot_arg]
#endif
; setup "current" tsk and optionally cache it in dedicated r25 ; setup "current" tsk and optionally cache it in dedicated r25
mov r9, @init_task mov r9, @init_task
......
...@@ -49,11 +49,13 @@ void arc_init_IRQ(void) ...@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
*(unsigned int *)&ictrl = 0; *(unsigned int *)&ictrl = 0;
#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
ictrl.save_blink = 1; ictrl.save_blink = 1;
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
#endif
WRITE_AUX(AUX_IRQ_CTRL, ictrl); WRITE_AUX(AUX_IRQ_CTRL, ictrl);
......
...@@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void) ...@@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void)
cpu->bpu.ret_stk = 4 << bpu.rse; cpu->bpu.ret_stk = 4 << bpu.rse;
if (cpu->core.family >= 0x54) { if (cpu->core.family >= 0x54) {
unsigned int exec_ctrl;
READ_BCR(AUX_EXEC_CTRL, exec_ctrl); struct bcr_uarch_build_arcv2 uarch;
cpu->extn.dual_enb = !(exec_ctrl & 1);
/* dual issue always present for this core */ /*
cpu->extn.dual = 1; * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
* dual issue only (HS4x). But next uarch rev (1:0)
* allows it be configured for single issue (HS3x)
* Ensure we fiddle with dual issue only on HS4x
*/
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
if (uarch.prod == 4) {
unsigned int exec_ctrl;
/* dual issue hardware always present */
cpu->extn.dual = 1;
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
/* dual issue hardware enabled ? */
cpu->extn.dual_enb = !(exec_ctrl & 1);
}
} }
} }
READ_BCR(ARC_REG_AP_BCR, ap); READ_BCR(ARC_REG_AP_BCR, ap);
if (ap.ver) { if (ap.ver) {
cpu->extn.ap_num = 2 << ap.num; cpu->extn.ap_num = 2 << ap.num;
cpu->extn.ap_full = !!ap.min; cpu->extn.ap_full = !ap.min;
} }
READ_BCR(ARC_REG_SMART_BCR, bcr); READ_BCR(ARC_REG_SMART_BCR, bcr);
...@@ -462,43 +478,78 @@ void setup_processor(void) ...@@ -462,43 +478,78 @@ void setup_processor(void)
arc_chk_core_config(); arc_chk_core_config();
} }
static inline int is_kernel(unsigned long addr) static inline bool uboot_arg_invalid(unsigned long addr)
{ {
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) /*
return 1; * Check that it is a untranslated address (although MMU is not enabled
return 0; * yet, it being a high address ensures this is not by fluke)
*/
if (addr < PAGE_OFFSET)
return true;
/* Check that address doesn't clobber resident kernel image */
return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
} }
void __init setup_arch(char **cmdline_p) #define IGNORE_ARGS "Ignore U-boot args: "
/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
#define UBOOT_TAG_NONE 0
#define UBOOT_TAG_CMDLINE 1
#define UBOOT_TAG_DTB 2
void __init handle_uboot_args(void)
{ {
#ifdef CONFIG_ARC_UBOOT_SUPPORT bool use_embedded_dtb = true;
/* make sure that uboot passed pointer to cmdline/dtb is valid */ bool append_cmdline = false;
if (uboot_tag && is_kernel((unsigned long)uboot_arg))
panic("Invalid uboot arg\n"); /* check that we know this tag */
if (uboot_tag != UBOOT_TAG_NONE &&
/* See if u-boot passed an external Device Tree blob */ uboot_tag != UBOOT_TAG_CMDLINE &&
machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ uboot_tag != UBOOT_TAG_DTB) {
if (!machine_desc) pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
#endif goto ignore_uboot_args;
{ }
/* No, so try the embedded one */
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_arg_invalid((unsigned long)uboot_arg)) {
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
goto ignore_uboot_args;
}
/* see if U-boot passed an external Device Tree blob */
if (uboot_tag == UBOOT_TAG_DTB) {
machine_desc = setup_machine_fdt((void *)uboot_arg);
/* external Device Tree blob is invalid - use embedded one */
use_embedded_dtb = !machine_desc;
}
if (uboot_tag == UBOOT_TAG_CMDLINE)
append_cmdline = true;
ignore_uboot_args:
if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start); machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc) if (!machine_desc)
panic("Embedded DT invalid\n"); panic("Embedded DT invalid\n");
}
/* /*
* If we are here, it is established that @uboot_arg didn't * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
* point to DT blob. Instead if u-boot says it is cmdline, * append processing can only happen after.
* append to embedded DT cmdline. */
* setup_machine_fdt() would have populated @boot_command_line if (append_cmdline) {
*/ /* Ensure a whitespace between the 2 cmdlines */
if (uboot_tag == 1) { strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
/* Ensure a whitespace between the 2 cmdlines */ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg,
COMMAND_LINE_SIZE);
}
} }
}
void __init setup_arch(char **cmdline_p)
{
handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */ /* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line; *cmdline_p = boot_command_line;
......
...@@ -25,15 +25,11 @@ ...@@ -25,15 +25,11 @@
#endif #endif
#ifdef CONFIG_ARC_HAS_LL64 #ifdef CONFIG_ARC_HAS_LL64
# define PREFETCH_READ(RX) prefetch [RX, 56]
# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
# define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define LOADX(DST,RX) ldd.ab DST, [RX, 8]
# define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8]
# define ZOLSHFT 5 # define ZOLSHFT 5
# define ZOLAND 0x1F # define ZOLAND 0x1F
#else #else
# define PREFETCH_READ(RX) prefetch [RX, 28]
# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
# define LOADX(DST,RX) ld.ab DST, [RX, 4] # define LOADX(DST,RX) ld.ab DST, [RX, 4]
# define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4]
# define ZOLSHFT 4 # define ZOLSHFT 4
...@@ -41,8 +37,6 @@ ...@@ -41,8 +37,6 @@
#endif #endif
ENTRY_CFI(memcpy) ENTRY_CFI(memcpy)
prefetch [r1] ; Prefetch the read location
prefetchw [r0] ; Prefetch the write location
mov.f 0, r2 mov.f 0, r2
;;; if size is zero ;;; if size is zero
jz.d [blink] jz.d [blink]
...@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) ...@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy32_64bytes lpnz @.Lcopy32_64bytes
;; LOOP START ;; LOOP START
LOADX (r6, r1) LOADX (r6, r1)
PREFETCH_READ (r1)
PREFETCH_WRITE (r3)
LOADX (r8, r1) LOADX (r8, r1)
LOADX (r10, r1) LOADX (r10, r1)
LOADX (r4, r1) LOADX (r4, r1)
...@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) ...@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_1 lpnz @.Lcopy8bytes_1
;; LOOP START ;; LOOP START
ld.ab r6, [r1, 4] ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4] ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 24) SHIFT_1 (r7, r6, 24)
or r7, r7, r5 or r7, r7, r5
...@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy) ...@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_2 lpnz @.Lcopy8bytes_2
;; LOOP START ;; LOOP START
ld.ab r6, [r1, 4] ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4] ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 16) SHIFT_1 (r7, r6, 16)
or r7, r7, r5 or r7, r7, r5
...@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy) ...@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
lpnz @.Lcopy8bytes_3 lpnz @.Lcopy8bytes_3
;; LOOP START ;; LOOP START
ld.ab r6, [r1, 4] ld.ab r6, [r1, 4]
prefetch [r1, 28] ;Prefetch the next read location
ld.ab r8, [r1,4] ld.ab r8, [r1,4]
prefetchw [r3, 32] ;Prefetch the next write location
SHIFT_1 (r7, r6, 8) SHIFT_1 (r7, r6, 8)
or r7, r7, r5 or r7, r7, r5
......
...@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK ...@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC" bool "ARC HS Development Kit SOC"
depends on ISA_ARCV2 depends on ISA_ARCV2
select ARC_HAS_ACCL_REGS select ARC_HAS_ACCL_REGS
select ARC_IRQ_NO_AUTOSAVE
select CLK_HSDK select CLK_HSDK
select RESET_HSDK select RESET_HSDK
select HAVE_PCI select HAVE_PCI
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment