Commit 15acb7ea authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.davemloft.net:/disk1/BK/sparc-2.6

into kernel.bkbits.net:/home/davem/sparc-2.6
parents 85714da1 3085f02b
This diff is collapsed.
...@@ -333,9 +333,8 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs) ...@@ -333,9 +333,8 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
current->mm->start_stack = current->mm->start_stack =
(unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
if (!(orig_thr_flags & _TIF_32BIT)) { if (!(orig_thr_flags & _TIF_32BIT)) {
unsigned long pgd_cache; unsigned long pgd_cache = get_pgd_cache(current->mm->pgd);
pgd_cache = ((unsigned long)pgd_val(current->mm->pgd[0]))<<11;
__asm__ __volatile__("stxa\t%0, [%1] %2\n\t" __asm__ __volatile__("stxa\t%0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
: /* no outputs */ : /* no outputs */
......
...@@ -440,7 +440,7 @@ void flush_thread(void) ...@@ -440,7 +440,7 @@ void flush_thread(void)
pmd_t *page = pmd_alloc_one(mm, 0); pmd_t *page = pmd_alloc_one(mm, 0);
pud_set(pud0, page); pud_set(pud0, page);
} }
pgd_cache = ((unsigned long) pud_val(*pud0)) << 11UL; pgd_cache = get_pgd_cache(pgd0);
} }
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
......
...@@ -894,9 +894,8 @@ static unsigned long penguins_are_doing_time; ...@@ -894,9 +894,8 @@ static unsigned long penguins_are_doing_time;
void smp_capture(void) void smp_capture(void)
{ {
int result = __atomic_add(1, &smp_capture_depth); int result = atomic_add_ret(1, &smp_capture_depth);
membar("#StoreStore | #LoadStore");
if (result == 1) { if (result == 1) {
int ncpus = num_online_cpus(); int ncpus = num_online_cpus();
......
...@@ -172,18 +172,25 @@ EXPORT_SYMBOL(down_interruptible); ...@@ -172,18 +172,25 @@ EXPORT_SYMBOL(down_interruptible);
EXPORT_SYMBOL(up); EXPORT_SYMBOL(up);
/* Atomic counter implementation. */ /* Atomic counter implementation. */
EXPORT_SYMBOL(__atomic_add); EXPORT_SYMBOL(atomic_add);
EXPORT_SYMBOL(__atomic_sub); EXPORT_SYMBOL(atomic_add_ret);
EXPORT_SYMBOL(__atomic64_add); EXPORT_SYMBOL(atomic_sub);
EXPORT_SYMBOL(__atomic64_sub); EXPORT_SYMBOL(atomic_sub_ret);
EXPORT_SYMBOL(atomic64_add);
EXPORT_SYMBOL(atomic64_add_ret);
EXPORT_SYMBOL(atomic64_sub);
EXPORT_SYMBOL(atomic64_sub_ret);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(_atomic_dec_and_lock); EXPORT_SYMBOL(_atomic_dec_and_lock);
#endif #endif
/* Atomic bit operations. */ /* Atomic bit operations. */
EXPORT_SYMBOL(___test_and_set_bit); EXPORT_SYMBOL(test_and_set_bit);
EXPORT_SYMBOL(___test_and_clear_bit); EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(___test_and_change_bit); EXPORT_SYMBOL(test_and_change_bit);
EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
/* Bit searching */ /* Bit searching */
EXPORT_SYMBOL(find_next_bit); EXPORT_SYMBOL(find_next_bit);
......
...@@ -4,73 +4,136 @@ ...@@ -4,73 +4,136 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com) * Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/ */
#include <linux/config.h>
#include <asm/asi.h> #include <asm/asi.h>
/* On SMP we need to use memory barriers to ensure
* correct memory operation ordering, nop these out
* for uniprocessor.
*/
#ifdef CONFIG_SMP
#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad
#define ATOMIC_POST_BARRIER membar #StoreLoad | #StoreStore
#else
#define ATOMIC_PRE_BARRIER nop
#define ATOMIC_POST_BARRIER nop
#endif
.text .text
/* We use these stubs for the uncommon case /* Two versions of the atomic routines, one that
* of contention on the atomic value. This is * does not return a value and does not perform
* so that we can keep the main fast path 8 * memory barriers, and a second which returns
* instructions long and thus fit into a single * a value and does the barriers.
* L2 cache line.
*/ */
__atomic_add_membar: .globl atomic_add
ba,pt %xcc, __atomic_add .type atomic_add,#function
membar #StoreLoad | #StoreStore atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
1: lduw [%o1], %g5
add %g5, %o0, %g7
cas [%o1], %g5, %g7
cmp %g5, %g7
bne,pn %icc, 1b
nop
retl
nop
.size atomic_add, .-atomic_add
__atomic_sub_membar: .globl atomic_sub
ba,pt %xcc, __atomic_sub .type atomic_sub,#function
membar #StoreLoad | #StoreStore atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
1: lduw [%o1], %g5
sub %g5, %o0, %g7
cas [%o1], %g5, %g7
cmp %g5, %g7
bne,pn %icc, 1b
nop
retl
nop
.size atomic_sub, .-atomic_sub
.align 64 .globl atomic_add_ret
.globl __atomic_add .type atomic_add_ret,#function
.type __atomic_add,#function atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ ATOMIC_PRE_BARRIER
lduw [%o1], %g5 1: lduw [%o1], %g5
add %g5, %o0, %g7 add %g5, %o0, %g7
cas [%o1], %g5, %g7 cas [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %icc, __atomic_add_membar bne,pn %icc, 1b
add %g7, %o0, %g7 add %g7, %o0, %g7
ATOMIC_POST_BARRIER
retl retl
sra %g7, 0, %o0 sra %g7, 0, %o0
.size __atomic_add, .-__atomic_add .size atomic_add_ret, .-atomic_add_ret
.globl __atomic_sub .globl atomic_sub_ret
.type __atomic_sub,#function .type atomic_sub_ret,#function
__atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
lduw [%o1], %g5 ATOMIC_PRE_BARRIER
1: lduw [%o1], %g5
sub %g5, %o0, %g7 sub %g5, %o0, %g7
cas [%o1], %g5, %g7 cas [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %icc, __atomic_sub_membar bne,pn %icc, 1b
sub %g7, %o0, %g7 sub %g7, %o0, %g7
ATOMIC_POST_BARRIER
retl retl
sra %g7, 0, %o0 sra %g7, 0, %o0
.size __atomic_sub, .-__atomic_sub .size atomic_sub_ret, .-atomic_sub_ret
.globl atomic64_add
.type atomic64_add,#function
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
1: ldx [%o1], %g5
add %g5, %o0, %g7
casx [%o1], %g5, %g7
cmp %g5, %g7
bne,pn %xcc, 1b
nop
retl
nop
.size atomic64_add, .-atomic64_add
.globl __atomic64_add .globl atomic64_sub
.type __atomic64_add,#function .type atomic64_sub,#function
__atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
ldx [%o1], %g5 1: ldx [%o1], %g5
sub %g5, %o0, %g7
casx [%o1], %g5, %g7
cmp %g5, %g7
bne,pn %xcc, 1b
nop
retl
nop
.size atomic64_sub, .-atomic64_sub
.globl atomic64_add_ret
.type atomic64_add_ret,#function
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ATOMIC_PRE_BARRIER
1: ldx [%o1], %g5
add %g5, %o0, %g7 add %g5, %o0, %g7
casx [%o1], %g5, %g7 casx [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %xcc, __atomic64_add bne,pn %xcc, 1b
membar #StoreLoad | #StoreStore add %g7, %o0, %g7
ATOMIC_POST_BARRIER
retl retl
add %g7, %o0, %o0 mov %g7, %o0
.size __atomic64_add, .-__atomic64_add .size atomic64_add_ret, .-atomic64_add_ret
.globl __atomic64_sub .globl atomic64_sub_ret
.type __atomic64_sub,#function .type atomic64_sub_ret,#function
__atomic64_sub: /* %o0 = increment, %o1 = atomic_ptr */ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
ldx [%o1], %g5 ATOMIC_PRE_BARRIER
1: ldx [%o1], %g5
sub %g5, %o0, %g7 sub %g5, %o0, %g7
casx [%o1], %g5, %g7 casx [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %xcc, __atomic64_sub bne,pn %xcc, 1b
membar #StoreLoad | #StoreStore sub %g7, %o0, %g7
ATOMIC_POST_BARRIER
retl retl
sub %g7, %o0, %o0 mov %g7, %o0
.size __atomic64_sub, .-__atomic64_sub .size atomic64_sub_ret, .-atomic64_sub_ret
...@@ -4,69 +4,142 @@ ...@@ -4,69 +4,142 @@
* Copyright (C) 2000 David S. Miller (davem@redhat.com) * Copyright (C) 2000 David S. Miller (davem@redhat.com)
*/ */
#include <linux/config.h>
#include <asm/asi.h> #include <asm/asi.h>
/* On SMP we need to use memory barriers to ensure
* correct memory operation ordering, nop these out
* for uniprocessor.
*/
#ifdef CONFIG_SMP
#define BITOP_PRE_BARRIER membar #StoreLoad | #LoadLoad
#define BITOP_POST_BARRIER membar #StoreLoad | #StoreStore
#else
#define BITOP_PRE_BARRIER nop
#define BITOP_POST_BARRIER nop
#endif
.text .text
.align 64
.globl ___test_and_set_bit .globl test_and_set_bit
.type ___test_and_set_bit,#function .type test_and_set_bit,#function
___test_and_set_bit: /* %o0=nr, %o1=addr */ test_and_set_bit: /* %o0=nr, %o1=addr */
BITOP_PRE_BARRIER
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %g5 mov 1, %g5
sllx %g1, 3, %g3 sllx %g1, 3, %g3
and %o0, 63, %g2 and %o0, 63, %g2
sllx %g5, %g2, %g5 sllx %g5, %g2, %g5
add %o1, %g3, %o1 add %o1, %g3, %o1
ldx [%o1], %g7 1: ldx [%o1], %g7
1: andcc %g7, %g5, %o0 or %g7, %g5, %g1
bne,pn %xcc, 2f casx [%o1], %g7, %g1
xor %g7, %g5, %g1 cmp %g7, %g1
bne,pn %xcc, 1b
and %g7, %g5, %g2
BITOP_POST_BARRIER
clr %o0
retl
movrne %g2, 1, %o0
.size test_and_set_bit, .-test_and_set_bit
.globl test_and_clear_bit
.type test_and_clear_bit,#function
test_and_clear_bit: /* %o0=nr, %o1=addr */
BITOP_PRE_BARRIER
srlx %o0, 6, %g1
mov 1, %g5
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %g5, %g2, %g5
add %o1, %g3, %o1
1: ldx [%o1], %g7
andn %g7, %g5, %g1
casx [%o1], %g7, %g1 casx [%o1], %g7, %g1
cmp %g7, %g1 cmp %g7, %g1
bne,a,pn %xcc, 1b bne,pn %xcc, 1b
ldx [%o1], %g7 and %g7, %g5, %g2
2: retl BITOP_POST_BARRIER
membar #StoreLoad | #StoreStore clr %o0
.size ___test_and_set_bit, .-___test_and_set_bit retl
movrne %g2, 1, %o0
.size test_and_clear_bit, .-test_and_clear_bit
.globl ___test_and_clear_bit .globl test_and_change_bit
.type ___test_and_clear_bit,#function .type test_and_change_bit,#function
___test_and_clear_bit: /* %o0=nr, %o1=addr */ test_and_change_bit: /* %o0=nr, %o1=addr */
BITOP_PRE_BARRIER
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %g5 mov 1, %g5
sllx %g1, 3, %g3 sllx %g1, 3, %g3
and %o0, 63, %g2 and %o0, 63, %g2
sllx %g5, %g2, %g5 sllx %g5, %g2, %g5
add %o1, %g3, %o1 add %o1, %g3, %o1
ldx [%o1], %g7 1: ldx [%o1], %g7
1: andcc %g7, %g5, %o0
be,pn %xcc, 2f
xor %g7, %g5, %g1 xor %g7, %g5, %g1
casx [%o1], %g7, %g1 casx [%o1], %g7, %g1
cmp %g7, %g1 cmp %g7, %g1
bne,a,pn %xcc, 1b bne,pn %xcc, 1b
ldx [%o1], %g7 and %g7, %g5, %g2
2: retl BITOP_POST_BARRIER
membar #StoreLoad | #StoreStore clr %o0
.size ___test_and_clear_bit, .-___test_and_clear_bit retl
movrne %g2, 1, %o0
.size test_and_change_bit, .-test_and_change_bit
.globl set_bit
.type set_bit,#function
set_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1
mov 1, %g5
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %g5, %g2, %g5
add %o1, %g3, %o1
1: ldx [%o1], %g7
or %g7, %g5, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, 1b
nop
retl
nop
.size set_bit, .-set_bit
.globl clear_bit
.type clear_bit,#function
clear_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1
mov 1, %g5
sllx %g1, 3, %g3
and %o0, 63, %g2
sllx %g5, %g2, %g5
add %o1, %g3, %o1
1: ldx [%o1], %g7
andn %g7, %g5, %g1
casx [%o1], %g7, %g1
cmp %g7, %g1
bne,pn %xcc, 1b
nop
retl
nop
.size clear_bit, .-clear_bit
.globl ___test_and_change_bit .globl change_bit
.type ___test_and_change_bit,#function .type change_bit,#function
___test_and_change_bit: /* %o0=nr, %o1=addr */ change_bit: /* %o0=nr, %o1=addr */
srlx %o0, 6, %g1 srlx %o0, 6, %g1
mov 1, %g5 mov 1, %g5
sllx %g1, 3, %g3 sllx %g1, 3, %g3
and %o0, 63, %g2 and %o0, 63, %g2
sllx %g5, %g2, %g5 sllx %g5, %g2, %g5
add %o1, %g3, %o1 add %o1, %g3, %o1
ldx [%o1], %g7 1: ldx [%o1], %g7
1: and %g7, %g5, %o0
xor %g7, %g5, %g1 xor %g7, %g5, %g1
casx [%o1], %g7, %g1 casx [%o1], %g7, %g1
cmp %g7, %g1 cmp %g7, %g1
bne,a,pn %xcc, 1b bne,pn %xcc, 1b
ldx [%o1], %g7 nop
2: retl retl
membar #StoreLoad | #StoreStore
nop nop
.size ___test_and_change_bit, .-___test_and_change_bit .size change_bit, .-change_bit
...@@ -469,9 +469,9 @@ static void cg14_init_one(struct sbus_dev *sdev, int node, int parent_node) ...@@ -469,9 +469,9 @@ static void cg14_init_one(struct sbus_dev *sdev, int node, int parent_node)
int is_8mb, linebytes, i; int is_8mb, linebytes, i;
if (!sdev) { if (!sdev) {
prom_getproperty(node, "address", if (prom_getproperty(node, "address",
(char *) &bases[0], sizeof(bases)); (char *) &bases[0], sizeof(bases)) <= 0
if (!bases[0]) { || !bases[0]) {
printk(KERN_ERR "cg14: Device is not mapped.\n"); printk(KERN_ERR "cg14: Device is not mapped.\n");
return; return;
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#ifndef __ARCH_SPARC64_ATOMIC__ #ifndef __ARCH_SPARC64_ATOMIC__
#define __ARCH_SPARC64_ATOMIC__ #define __ARCH_SPARC64_ATOMIC__
#include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
...@@ -22,29 +23,27 @@ typedef struct { volatile __s64 counter; } atomic64_t; ...@@ -22,29 +23,27 @@ typedef struct { volatile __s64 counter; } atomic64_t;
#define atomic_set(v, i) (((v)->counter) = i) #define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i) #define atomic64_set(v, i) (((v)->counter) = i)
extern int __atomic_add(int, atomic_t *); extern void atomic_add(int, atomic_t *);
extern int __atomic64_add(__s64, atomic64_t *); extern void atomic64_add(int, atomic64_t *);
extern void atomic_sub(int, atomic_t *);
extern void atomic64_sub(int, atomic64_t *);
extern int __atomic_sub(int, atomic_t *); extern int atomic_add_ret(int, atomic_t *);
extern int __atomic64_sub(__s64, atomic64_t *); extern int atomic64_add_ret(int, atomic64_t *);
extern int atomic_sub_ret(int, atomic_t *);
extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_add(i, v) ((void)__atomic_add(i, v)) #define atomic_dec_return(v) atomic_sub_ret(1, v)
#define atomic64_add(i, v) ((void)__atomic64_add(i, v)) #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
#define atomic_sub(i, v) ((void)__atomic_sub(i, v)) #define atomic_inc_return(v) atomic_add_ret(1, v)
#define atomic64_sub(i, v) ((void)__atomic64_sub(i, v)) #define atomic64_inc_return(v) atomic64_add_ret(1, v)
#define atomic_dec_return(v) __atomic_sub(1, v) #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
#define atomic64_dec_return(v) __atomic64_sub(1, v) #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
#define atomic_inc_return(v) __atomic_add(1, v) #define atomic_add_return(i, v) atomic_add_ret(i, v)
#define atomic64_inc_return(v) __atomic64_add(1, v) #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
#define atomic_sub_return(i, v) __atomic_sub(i, v)
#define atomic64_sub_return(i, v) __atomic64_sub(i, v)
#define atomic_add_return(i, v) __atomic_add(i, v)
#define atomic64_add_return(i, v) __atomic64_add(i, v)
/* /*
* atomic_inc_and_test - increment and test * atomic_inc_and_test - increment and test
...@@ -56,25 +55,32 @@ extern int __atomic64_sub(__s64, atomic64_t *); ...@@ -56,25 +55,32 @@ extern int __atomic64_sub(__s64, atomic64_t *);
*/ */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (__atomic_sub(i, v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
#define atomic64_sub_and_test(i, v) (__atomic64_sub(i, v) == 0) #define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
#define atomic_dec_and_test(v) (__atomic_sub(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
#define atomic64_dec_and_test(v) (__atomic64_sub(1, v) == 0) #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
#define atomic_inc(v) ((void)__atomic_add(1, v)) #define atomic_inc(v) atomic_add(1, v)
#define atomic64_inc(v) ((void)__atomic64_add(1, v)) #define atomic64_inc(v) atomic64_add(1, v)
#define atomic_dec(v) ((void)__atomic_sub(1, v)) #define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec(v) ((void)__atomic64_sub(1, v)) #define atomic64_dec(v) atomic64_sub(1, v)
#define atomic_add_negative(i, v) (__atomic_add(i, v) < 0) #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (__atomic64_add(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
#ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec() membar("#StoreLoad | #LoadLoad")
#define smp_mb__after_atomic_dec() membar("#StoreLoad | #StoreStore")
#define smp_mb__before_atomic_inc() membar("#StoreLoad | #LoadLoad")
#define smp_mb__after_atomic_inc() membar("#StoreLoad | #StoreStore")
#else
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#endif
#endif /* !(__ARCH_SPARC64_ATOMIC__) */ #endif /* !(__ARCH_SPARC64_ATOMIC__) */
...@@ -7,19 +7,16 @@ ...@@ -7,19 +7,16 @@
#ifndef _SPARC64_BITOPS_H #ifndef _SPARC64_BITOPS_H
#define _SPARC64_BITOPS_H #define _SPARC64_BITOPS_H
#include <linux/config.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
extern long ___test_and_set_bit(unsigned long nr, volatile unsigned long *addr); extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
extern long ___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
extern long ___test_and_change_bit(unsigned long nr, volatile unsigned long *addr); extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
extern void set_bit(unsigned long nr, volatile unsigned long *addr);
#define test_and_set_bit(nr,addr) ({___test_and_set_bit(nr,addr)!=0;}) extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
#define test_and_clear_bit(nr,addr) ({___test_and_clear_bit(nr,addr)!=0;}) extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#define test_and_change_bit(nr,addr) ({___test_and_change_bit(nr,addr)!=0;})
#define set_bit(nr,addr) ((void)___test_and_set_bit(nr,addr))
#define clear_bit(nr,addr) ((void)___test_and_clear_bit(nr,addr))
#define change_bit(nr,addr) ((void)___test_and_change_bit(nr,addr))
/* "non-atomic" versions... */ /* "non-atomic" versions... */
...@@ -74,8 +71,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr ...@@ -74,8 +71,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
return ((old & mask) != 0); return ((old & mask) != 0);
} }
#define smp_mb__before_clear_bit() do { } while(0) #ifdef CONFIG_SMP
#define smp_mb__after_clear_bit() do { } while(0) #define smp_mb__before_clear_bit() membar("#StoreLoad | #LoadLoad")
#define smp_mb__after_clear_bit() membar("#StoreLoad | #StoreStore")
#else
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#endif
static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
{ {
...@@ -230,9 +232,9 @@ extern unsigned long find_next_zero_bit(const unsigned long *, ...@@ -230,9 +232,9 @@ extern unsigned long find_next_zero_bit(const unsigned long *,
find_next_zero_bit((addr), (size), 0) find_next_zero_bit((addr), (size), 0)
#define test_and_set_le_bit(nr,addr) \ #define test_and_set_le_bit(nr,addr) \
({ ___test_and_set_bit((nr) ^ 0x38, (addr)) != 0; }) test_and_set_bit((nr) ^ 0x38, (addr))
#define test_and_clear_le_bit(nr,addr) \ #define test_and_clear_le_bit(nr,addr) \
({ ___test_and_clear_bit((nr) ^ 0x38, (addr)) != 0; }) test_and_clear_bit((nr) ^ 0x38, (addr))
static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr)
{ {
...@@ -251,12 +253,21 @@ extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsig ...@@ -251,12 +253,21 @@ extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsig
#ifdef __KERNEL__ #ifdef __KERNEL__
#define __set_le_bit(nr, addr) \
__set_bit((nr) ^ 0x38, (addr))
#define __clear_le_bit(nr, addr) \
__clear_bit((nr) ^ 0x38, (addr))
#define __test_and_clear_le_bit(nr, addr) \
__test_and_clear_bit((nr) ^ 0x38, (addr))
#define __test_and_set_le_bit(nr, addr) \
__test_and_set_bit((nr) ^ 0x38, (addr))
#define ext2_set_bit(nr,addr) \ #define ext2_set_bit(nr,addr) \
test_and_set_le_bit((nr),(unsigned long *)(addr)) __test_and_set_le_bit((nr),(unsigned long *)(addr))
#define ext2_set_bit_atomic(lock,nr,addr) \ #define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_le_bit((nr),(unsigned long *)(addr)) test_and_set_le_bit((nr),(unsigned long *)(addr))
#define ext2_clear_bit(nr,addr) \ #define ext2_clear_bit(nr,addr) \
test_and_clear_le_bit((nr),(unsigned long *)(addr)) __test_and_clear_le_bit((nr),(unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock,nr,addr) \ #define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_le_bit((nr),(unsigned long *)(addr)) test_and_clear_le_bit((nr),(unsigned long *)(addr))
#define ext2_test_bit(nr,addr) \ #define ext2_test_bit(nr,addr) \
......
...@@ -83,8 +83,7 @@ do { \ ...@@ -83,8 +83,7 @@ do { \
paddr = __pa((__mm)->pgd); \ paddr = __pa((__mm)->pgd); \
pgd_cache = 0UL; \ pgd_cache = 0UL; \
if ((__tsk)->thread_info->flags & _TIF_32BIT) \ if ((__tsk)->thread_info->flags & _TIF_32BIT) \
pgd_cache = \ pgd_cache = get_pgd_cache((__mm)->pgd); \
((unsigned long)pgd_val((__mm)->pgd[0])) << 11UL; \
__asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
"mov %3, %%g4\n\t" \ "mov %3, %%g4\n\t" \
"mov %0, %%g7\n\t" \ "mov %0, %%g7\n\t" \
......
...@@ -312,6 +312,11 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -312,6 +312,11 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* extract the pgd cache used for optimizing the tlb miss
* slow path when executing 32-bit compat processes
*/
#define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11)
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
#define pmd_offset(pudp, address) \ #define pmd_offset(pudp, address) \
((pmd_t *) pud_page(*(pudp)) + \ ((pmd_t *) pud_page(*(pudp)) + \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment