Commit 815a4bb1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-fixes-for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC Fixes from Vineet Gupta:
 - Handle unaligned access in zero delay loops
 - spinlock livelock fix for SMP systemC model
 - fix 32bit overflow in access_ok
 - better setup of clockevents

* tag 'arc-fixes-for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: Use clockevents_config_and_register over clockevents_register_device
  ARC: Workaround spinlock livelock in SMP SystemC simulation
  ARC: Fix 32-bit wrap around in access_ok()
  ARC: Handle zero-overhead-loop in unaligned access handler
parents 15c03dd4 55c2e262
...@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
__asm__ __volatile__(
" ex %0, [%1] \n"
: "+r" (tmp)
: "r"(&(lock->slock))
: "memory");
smp_mb(); smp_mb();
} }
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
* Because it essentially checks if buffer end is within limit and @len is * Because it essentially checks if buffer end is within limit and @len is
* non-ngeative, which implies that buffer start will be within limit too. * non-ngeative, which implies that buffer start will be within limit too.
* *
* The reason for rewriting being, for majorit yof cases, @len is generally * The reason for rewriting being, for majority of cases, @len is generally
* compile time constant, causing first sub-expression to be compile time * compile time constant, causing first sub-expression to be compile time
* subsumed. * subsumed.
* *
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
* *
*/ */
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ #define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
(((addr)+(sz)) <= get_fs())) ((addr) <= (get_fs() - (sz))))
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ #define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
likely(__user_ok((addr), (sz)))) likely(__user_ok((addr), (sz))))
......
...@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu) ...@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
{ {
struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
clk->cpumask = cpumask_of(cpu); clk->cpumask = cpumask_of(cpu);
clockevents_config_and_register(clk, arc_get_core_freq(),
clockevents_register_device(clk); 0, ARC_TIMER_MAX);
/* /*
* setup the per-cpu timer IRQ handler - for all cpus * setup the per-cpu timer IRQ handler - for all cpus
......
...@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, ...@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
regs->status32 &= ~STATUS_DE_MASK; regs->status32 &= ~STATUS_DE_MASK;
} else { } else {
regs->ret += state.instr_len; regs->ret += state.instr_len;
/* handle zero-overhead-loop */
if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
regs->ret = regs->lp_start;
regs->lp_count--;
}
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment