Commit f3935926 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-5.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC fixes from Vineet Gupta:

 - Assorted fixes

* tag 'arc-5.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: remove redundant READ_ONCE() in cmpxchg loop
  ARC: atomic: cleanup atomic-llsc definitions
  arc: drop definitions of pgd_index() and pgd_offset{, _k}() entirely
  ARC: dts: align SPI NOR node name with dtschema
  ARC: Remove a redundant memset()
  ARC: fix typos in comments
  ARC: entry: fix syscall_trace_exit argument
parents 6fc2586d c6ed4d84
...@@ -275,7 +275,7 @@ spi0: spi@20000 { ...@@ -275,7 +275,7 @@ spi0: spi@20000 {
cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>, cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
<&creg_gpio 1 GPIO_ACTIVE_LOW>; <&creg_gpio 1 GPIO_ACTIVE_LOW>;
spi-flash@0 { flash@0 {
compatible = "sst26wf016b", "jedec,spi-nor"; compatible = "sst26wf016b", "jedec,spi-nor";
reg = <0>; reg = <0>;
#address-cells = <1>; #address-cells = <1>;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned int val; \ unsigned int val; \
...@@ -21,7 +21,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \ ...@@ -21,7 +21,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: "cc"); \ : "cc"); \
} \ } \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, asm_op) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \ { \
unsigned int val; \ unsigned int val; \
...@@ -42,7 +42,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ ...@@ -42,7 +42,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, asm_op) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \ { \
unsigned int val, orig; \ unsigned int val, orig; \
...@@ -69,23 +69,23 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ ...@@ -69,23 +69,23 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#define ATOMIC_OPS(op, c_op, asm_op) \ #define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op) \ ATOMIC_OP_RETURN(op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_FETCH_OP(op, asm_op)
ATOMIC_OPS(add, +=, add) ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, -=, sub) ATOMIC_OPS(sub, sub)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \ #define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP(op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_FETCH_OP(op, asm_op)
ATOMIC_OPS(and, &=, and) ATOMIC_OPS(and, and)
ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, |=, or) ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, ^=, xor) ATOMIC_OPS(xor, xor)
#define arch_atomic_andnot arch_atomic_andnot #define arch_atomic_andnot arch_atomic_andnot
......
...@@ -98,9 +98,6 @@ ...@@ -98,9 +98,6 @@
/* /*
* 1st level paging: pgd * 1st level paging: pgd
*/ */
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
#define pgd_offset(mm, addr) (((mm)->pgd) + pgd_index(addr))
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
......
...@@ -366,7 +366,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state, ...@@ -366,7 +366,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */ case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
/* note: we are ignoring possibility of: /* note: we are ignoring possibility of:
* ADD_S, SUB_S, PUSH_S, POP_S as these should not * ADD_S, SUB_S, PUSH_S, POP_S as these should not
* cause unaliged exception anyway */ * cause unaligned exception anyway */
state->write = BITS(state->words[0], 6, 6); state->write = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 5, 5); state->zz = BITS(state->words[0], 5, 5);
if (state->zz) if (state->zz)
...@@ -503,7 +503,6 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs, ...@@ -503,7 +503,6 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
{ {
struct disasm_state instr; struct disasm_state instr;
memset(&instr, 0, sizeof(struct disasm_state));
disasm_instr(pc, &instr, 0, regs, cregs); disasm_instr(pc, &instr, 0, regs, cregs);
*next_pc = pc + instr.instr_len; *next_pc = pc + instr.instr_len;
......
...@@ -196,6 +196,7 @@ tracesys_exit: ...@@ -196,6 +196,7 @@ tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs st r0, [sp, PT_r0] ; sys call return value in pt_regs
;POST Sys Call Ptrace Hook ;POST Sys Call Ptrace Hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_exit bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above ; we'd done before calling post hook above
......
...@@ -319,7 +319,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) ...@@ -319,7 +319,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
regs->ret = (unsigned long)ksig->ka.sa.sa_handler; regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
/* /*
* handler returns using sigreturn stub provided already by userpsace * handler returns using sigreturn stub provided already by userspace
* If not, nuke the process right away * If not, nuke the process right away
*/ */
if(!(ksig->ka.sa.sa_flags & SA_RESTORER)) if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
......
...@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(smp_atomic_ops_lock); ...@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
struct plat_smp_ops __weak plat_smp_ops; struct plat_smp_ops __weak plat_smp_ops;
/* XXX: per cpu ? Only needed once in early seconday boot */ /* XXX: per cpu ? Only needed once in early secondary boot */
struct task_struct *secondary_idle_tsk; struct task_struct *secondary_idle_tsk;
/* Called from start_kernel */ /* Called from start_kernel */
...@@ -274,7 +274,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) ...@@ -274,7 +274,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
* and read back old value * and read back old value
*/ */
do { do {
new = old = READ_ONCE(*ipi_data_ptr); new = old = *ipi_data_ptr;
new |= 1U << msg; new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old); } while (cmpxchg(ipi_data_ptr, old, new) != old);
......
...@@ -237,7 +237,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, ...@@ -237,7 +237,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
if (state.fault) if (state.fault)
goto fault; goto fault;
/* clear any remanants of delay slot */ /* clear any remnants of delay slot */
if (delay_mode(regs)) { if (delay_mode(regs)) {
regs->ret = regs->bta & ~1U; regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK; regs->status32 &= ~STATUS_DE_MASK;
......
...@@ -401,7 +401,7 @@ static inline void __before_dc_op(const int op) ...@@ -401,7 +401,7 @@ static inline void __before_dc_op(const int op)
{ {
if (op == OP_FLUSH_N_INV) { if (op == OP_FLUSH_N_INV) {
/* Dcache provides 2 cmd: FLUSH or INV /* Dcache provides 2 cmd: FLUSH or INV
* INV inturn has sub-modes: DISCARD or FLUSH-BEFORE * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
* flush-n-inv is achieved by INV cmd but with IM=1 * flush-n-inv is achieved by INV cmd but with IM=1
* So toggle INV sub-mode depending on op request and default * So toggle INV sub-mode depending on op request and default
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment