Commit 4f97c9b2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

Pull Tile bugfixes from Chris Metcalf:
 "This fixes some serious issues with PREEMPT support, and a couple of
  smaller corner-case issues fixed in the last couple of weeks"

* 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  arch: tile: re-use kbasename() helper
  tile: use a more conservative __my_cpu_offset in CONFIG_PREEMPT
  tile: ensure interrupts disabled for preempt_schedule_irq()
  tile: change lock initalization in hardwall
  tile: include: asm: use 'long long' instead of 'u64' for atomic64_t and its related functions
parents 162bdafa 0cc96a74
...@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n) ...@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
* *
* Atomically sets @v to @i and returns old @v * Atomically sets @v to @i and returns old @v
*/ */
static inline u64 atomic64_xchg(atomic64_t *v, u64 n) static inline long long atomic64_xchg(atomic64_t *v, long long n)
{ {
return xchg64(&v->counter, n); return xchg64(&v->counter, n);
} }
...@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n) ...@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
* Atomically checks if @v holds @o and replaces it with @n if so. * Atomically checks if @v holds @o and replaces it with @n if so.
* Returns the old value at @v. * Returns the old value at @v.
*/ */
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
long long n)
{ {
return cmpxchg64(&v->counter, o, n); return cmpxchg64(&v->counter, o, n);
} }
......
...@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n) ...@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
/* A 64bit atomic type */ /* A 64bit atomic type */
typedef struct { typedef struct {
u64 __aligned(8) counter; long long counter;
} atomic64_t; } atomic64_t;
#define ATOMIC64_INIT(val) { (val) } #define ATOMIC64_INIT(val) { (val) }
...@@ -91,14 +91,14 @@ typedef struct { ...@@ -91,14 +91,14 @@ typedef struct {
* *
* Atomically reads the value of @v. * Atomically reads the value of @v.
*/ */
static inline u64 atomic64_read(const atomic64_t *v) static inline long long atomic64_read(const atomic64_t *v)
{ {
/* /*
* Requires an atomic op to read both 32-bit parts consistently. * Requires an atomic op to read both 32-bit parts consistently.
* Casting away const is safe since the atomic support routines * Casting away const is safe since the atomic support routines
* do not write to memory if the value has not been modified. * do not write to memory if the value has not been modified.
*/ */
return _atomic64_xchg_add((u64 *)&v->counter, 0); return _atomic64_xchg_add((long long *)&v->counter, 0);
} }
/** /**
...@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v) ...@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
static inline void atomic64_add(u64 i, atomic64_t *v) static inline void atomic64_add(long long i, atomic64_t *v)
{ {
_atomic64_xchg_add(&v->counter, i); _atomic64_xchg_add(&v->counter, i);
} }
...@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) ...@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
* *
* Atomically adds @i to @v and returns @i + @v * Atomically adds @i to @v and returns @i + @v
*/ */
static inline u64 atomic64_add_return(u64 i, atomic64_t *v) static inline long long atomic64_add_return(long long i, atomic64_t *v)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add(&v->counter, i) + i; return _atomic64_xchg_add(&v->counter, i) + i;
...@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) ...@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u. * Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) static inline long long atomic64_add_unless(atomic64_t *v, long long a,
long long u)
{ {
smp_mb(); /* barrier for proper semantics */ smp_mb(); /* barrier for proper semantics */
return _atomic64_xchg_add_unless(&v->counter, a, u) != u; return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
...@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) ...@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
* atomic64_set() can't be just a raw store, since it would be lost if it * atomic64_set() can't be just a raw store, since it would be lost if it
* fell between the load and store of one of the other atomic ops. * fell between the load and store of one of the other atomic ops.
*/ */
static inline void atomic64_set(atomic64_t *v, u64 n) static inline void atomic64_set(atomic64_t *v, long long n)
{ {
_atomic64_xchg(&v->counter, n); _atomic64_xchg(&v->counter, n);
} }
...@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p, ...@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); long long o, long long n);
extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
extern u64 __atomic64_xchg_add_unless(volatile u64 *p, extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
int *lock, u64 o, u64 n); long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p,
int *lock, long long o, long long n);
/* Return failure from the atomic wrappers. */ /* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr); struct __get_user __atomic_bad_address(int __user *addr);
......
...@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n); ...@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
int _atomic_xchg_add(int *v, int i); int _atomic_xchg_add(int *v, int i);
int _atomic_xchg_add_unless(int *v, int a, int u); int _atomic_xchg_add_unless(int *v, int a, int u);
int _atomic_cmpxchg(int *ptr, int o, int n); int _atomic_cmpxchg(int *ptr, int o, int n);
u64 _atomic64_xchg(u64 *v, u64 n); long long _atomic64_xchg(long long *v, long long n);
u64 _atomic64_xchg_add(u64 *v, u64 i); long long _atomic64_xchg_add(long long *v, long long i);
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); long long _atomic64_cmpxchg(long long *v, long long o, long long n);
#define xchg(ptr, n) \ #define xchg(ptr, n) \
({ \ ({ \
...@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 4) \ if (sizeof(*(ptr)) != 4) \
__cmpxchg_called_with_bad_pointer(); \ __cmpxchg_called_with_bad_pointer(); \
smp_mb(); \ smp_mb(); \
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
(int)n); \
}) })
#define xchg64(ptr, n) \ #define xchg64(ptr, n) \
...@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \ if (sizeof(*(ptr)) != 8) \
__xchg_called_with_bad_pointer(); \ __xchg_called_with_bad_pointer(); \
smp_mb(); \ smp_mb(); \
(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
(long long)(n)); \
}) })
#define cmpxchg64(ptr, o, n) \ #define cmpxchg64(ptr, o, n) \
...@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
if (sizeof(*(ptr)) != 8) \ if (sizeof(*(ptr)) != 8) \
__cmpxchg_called_with_bad_pointer(); \ __cmpxchg_called_with_bad_pointer(); \
smp_mb(); \ smp_mb(); \
(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
(long long)o, (long long)n); \
}) })
#else #else
...@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 4: \ case 4: \
__x = (typeof(__x))(unsigned long) \ __x = (typeof(__x))(unsigned long) \
__insn_exch4((ptr), (u32)(unsigned long)(n)); \ __insn_exch4((ptr), \
(u32)(unsigned long)(n)); \
break; \ break; \
case 8: \ case 8: \
__x = (typeof(__x)) \ __x = (typeof(__x)) \
__insn_exch((ptr), (unsigned long)(n)); \ __insn_exch((ptr), (unsigned long)(n)); \
break; \ break; \
default: \ default: \
...@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); ...@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 4: \ case 4: \
__x = (typeof(__x))(unsigned long) \ __x = (typeof(__x))(unsigned long) \
__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ __insn_cmpexch4((ptr), \
(u32)(unsigned long)(n)); \
break; \ break; \
case 8: \ case 8: \
__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ __x = (typeof(__x))__insn_cmpexch((ptr), \
(long long)(n)); \
break; \ break; \
default: \ default: \
__cmpxchg_called_with_bad_pointer(); \ __cmpxchg_called_with_bad_pointer(); \
......
...@@ -15,9 +15,37 @@ ...@@ -15,9 +15,37 @@
#ifndef _ASM_TILE_PERCPU_H #ifndef _ASM_TILE_PERCPU_H
#define _ASM_TILE_PERCPU_H #define _ASM_TILE_PERCPU_H
register unsigned long __my_cpu_offset __asm__("tp"); register unsigned long my_cpu_offset_reg asm("tp");
#define __my_cpu_offset __my_cpu_offset
#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) #ifdef CONFIG_PREEMPT
/*
* For full preemption, we can't just use the register variable
* directly, since we need barrier() to hazard against it, causing the
* compiler to reload anything computed from a previous "tp" value.
* But we also don't want to use volatile asm, since we'd like the
* compiler to be able to cache the value across multiple percpu reads.
* So we use a fake stack read as a hazard against barrier().
* The 'U' constraint is like 'm' but disallows postincrement.
*/
static inline unsigned long __my_cpu_offset(void)
{
unsigned long tp;
register unsigned long *sp asm("sp");
asm("move %0, tp" : "=r" (tp) : "U" (*sp));
return tp;
}
#define __my_cpu_offset __my_cpu_offset()
#else
/*
* We don't need to hazard against barrier() since "tp" doesn't ever
* change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
* changes at function call points, at which we are already re-reading
* the value of "tp" due to "my_cpu_offset_reg" being a global variable.
*/
#define __my_cpu_offset my_cpu_offset_reg
#endif
#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
......
...@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = { ...@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
0, 0,
"udn", "udn",
LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
NULL NULL
}, },
#ifndef __tilepro__ #ifndef __tilepro__
...@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = { ...@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
1, /* disabled pending hypervisor support */ 1, /* disabled pending hypervisor support */
"idn", "idn",
LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
NULL NULL
}, },
{ /* access to user-space IPI */ { /* access to user-space IPI */
...@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = { ...@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
0, 0,
"ipi", "ipi",
LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
NULL NULL
}, },
#endif #endif
......
...@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return) ...@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
} }
bzt r28, 1f bzt r28, 1f
bnz r29, 1f bnz r29, 1f
/* Disable interrupts explicitly for preemption. */
IRQ_DISABLE(r20,r21)
TRACE_IRQS_OFF
jal preempt_schedule_irq jal preempt_schedule_irq
FEEDBACK_REENTER(interrupt_return) FEEDBACK_REENTER(interrupt_return)
1: 1:
......
...@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return) ...@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
} }
beqzt r28, 1f beqzt r28, 1f
bnez r29, 1f bnez r29, 1f
/* Disable interrupts explicitly for preemption. */
IRQ_DISABLE(r20,r21)
TRACE_IRQS_OFF
jal preempt_schedule_irq jal preempt_schedule_irq
FEEDBACK_REENTER(interrupt_return) FEEDBACK_REENTER(interrupt_return)
1: 1:
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/dcache.h> #include <linux/dcache.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/string.h>
#include <asm/backtrace.h> #include <asm/backtrace.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
...@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt, ...@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
} }
if (vma->vm_file) { if (vma->vm_file) {
char *s;
p = d_path(&vma->vm_file->f_path, buf, bufsize); p = d_path(&vma->vm_file->f_path, buf, bufsize);
if (IS_ERR(p)) if (IS_ERR(p))
p = "?"; p = "?";
s = strrchr(p, '/'); name = kbasename(p);
if (s)
p = s+1;
} else { } else {
p = "anon"; name = "anon";
} }
/* Generate a string description of the vma info. */ /* Generate a string description of the vma info. */
namelen = strlen(p); namelen = strlen(name);
remaining = (bufsize - 1) - namelen; remaining = (bufsize - 1) - namelen;
memmove(buf, p, namelen); memmove(buf, name, namelen);
snprintf(buf + namelen, remaining, "[%lx+%lx] ", snprintf(buf + namelen, remaining, "[%lx+%lx] ",
vma->vm_start, vma->vm_end - vma->vm_start); vma->vm_start, vma->vm_end - vma->vm_start);
} }
......
...@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) ...@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
EXPORT_SYMBOL(_atomic_xor); EXPORT_SYMBOL(_atomic_xor);
u64 _atomic64_xchg(u64 *v, u64 n) long long _atomic64_xchg(long long *v, long long n)
{ {
return __atomic64_xchg(v, __atomic_setup(v), n); return __atomic64_xchg(v, __atomic_setup(v), n);
} }
EXPORT_SYMBOL(_atomic64_xchg); EXPORT_SYMBOL(_atomic64_xchg);
u64 _atomic64_xchg_add(u64 *v, u64 i) long long _atomic64_xchg_add(long long *v, long long i)
{ {
return __atomic64_xchg_add(v, __atomic_setup(v), i); return __atomic64_xchg_add(v, __atomic_setup(v), i);
} }
EXPORT_SYMBOL(_atomic64_xchg_add); EXPORT_SYMBOL(_atomic64_xchg_add);
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
{ {
/* /*
* Note: argument order is switched here since it is easier * Note: argument order is switched here since it is easier
...@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) ...@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
} }
EXPORT_SYMBOL(_atomic64_xchg_add_unless); EXPORT_SYMBOL(_atomic64_xchg_add_unless);
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) long long _atomic64_cmpxchg(long long *v, long long o, long long n)
{ {
return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment