Commit 37cfc3f6 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next:
  sparc32: remove unused file: include/asm/pgtsun4.h
  sparc32: fix PAGE_SIZE definition
  sparc32: enable different preemptions models
  sparc32: support atomic64_t
  apbuart: fix section mismatch warning
  sparc32: drop useless preprocessor conditional in atomic_32.h
  sparc32: drop unused atomic24 support
parents 38e5781b 6e4d177d
...@@ -31,6 +31,7 @@ config SPARC ...@@ -31,6 +31,7 @@ config SPARC
config SPARC32 config SPARC32
def_bool !64BIT def_bool !64BIT
select GENERIC_ATOMIC64
config SPARC64 config SPARC64
def_bool 64BIT def_bool 64BIT
...@@ -383,9 +384,7 @@ config SCHED_MC ...@@ -383,9 +384,7 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here. increased overhead in some places. If unsure say N here.
if SPARC64
source "kernel/Kconfig.preempt" source "kernel/Kconfig.preempt"
endif
config CMDLINE_BOOL config CMDLINE_BOOL
bool "Default bootloader kernel arguments" bool "Default bootloader kernel arguments"
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/types.h> #include <linux/types.h>
#ifdef __KERNEL__ #include <asm-generic/atomic64.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -52,112 +52,10 @@ extern void atomic_set(atomic_t *, int); ...@@ -52,112 +52,10 @@ extern void atomic_set(atomic_t *, int);
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC24_INIT(i) { ((i) << 8) }
static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#endif
static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#endif /* !(__ARCH_SPARC_ATOMIC__) */ #endif /* !(__ARCH_SPARC_ATOMIC__) */
...@@ -8,14 +8,10 @@ ...@@ -8,14 +8,10 @@
#ifndef _SPARC_PAGE_H #ifndef _SPARC_PAGE_H
#define _SPARC_PAGE_H #define _SPARC_PAGE_H
#define PAGE_SHIFT 12 #include <linux/const.h>
#ifndef __ASSEMBLY__ #define PAGE_SHIFT 12
/* I have my suspicions... -DaveM */ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#else
#define PAGE_SIZE (1 << PAGE_SHIFT)
#endif
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#include <asm/btfixup.h> #include <asm/btfixup.h>
......
/*
* pgtsun4.h: Sun4 specific pgtable.h defines and code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#ifndef _SPARC_PGTSUN4C_H
#define _SPARC_PGTSUN4C_H
#include <asm/contregs.h>
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define SUN4C_PMD_SHIFT 23
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SUN4C_PGDIR_SHIFT 23
#define SUN4C_PGDIR_SIZE (1UL << SUN4C_PGDIR_SHIFT)
#define SUN4C_PGDIR_MASK (~(SUN4C_PGDIR_SIZE-1))
#define SUN4C_PGDIR_ALIGN(addr) (((addr)+SUN4C_PGDIR_SIZE-1)&SUN4C_PGDIR_MASK)
/* To represent how the sun4c mmu really lays things out. */
#define SUN4C_REAL_PGDIR_SHIFT 18
#define SUN4C_REAL_PGDIR_SIZE (1UL << SUN4C_REAL_PGDIR_SHIFT)
#define SUN4C_REAL_PGDIR_MASK (~(SUN4C_REAL_PGDIR_SIZE-1))
#define SUN4C_REAL_PGDIR_ALIGN(addr) (((addr)+SUN4C_REAL_PGDIR_SIZE-1)&SUN4C_REAL_PGDIR_MASK)
/* 19 bit PFN on sun4 */
#define SUN4C_PFN_MASK 0x7ffff
/* Don't increase these unless the structures in sun4c.c are fixed */
#define SUN4C_MAX_SEGMAPS 256
#define SUN4C_MAX_CONTEXTS 16
/*
* To be efficient, and not have to worry about allocating such
* a huge pgd, we make the kernel sun4c tables each hold 1024
* entries and the pgd similarly just like the i386 tables.
*/
#define SUN4C_PTRS_PER_PTE 1024
#define SUN4C_PTRS_PER_PMD 1
#define SUN4C_PTRS_PER_PGD 1024
/*
* Sparc SUN4C pte fields.
*/
#define _SUN4C_PAGE_VALID 0x80000000
#define _SUN4C_PAGE_SILENT_READ 0x80000000 /* synonym */
#define _SUN4C_PAGE_DIRTY 0x40000000
#define _SUN4C_PAGE_SILENT_WRITE 0x40000000 /* synonym */
#define _SUN4C_PAGE_PRIV 0x20000000 /* privileged page */
#define _SUN4C_PAGE_NOCACHE 0x10000000 /* non-cacheable page */
#define _SUN4C_PAGE_PRESENT 0x08000000 /* implemented in software */
#define _SUN4C_PAGE_IO 0x04000000 /* I/O page */
#define _SUN4C_PAGE_FILE 0x02000000 /* implemented in software */
#define _SUN4C_PAGE_READ 0x00800000 /* implemented in software */
#define _SUN4C_PAGE_WRITE 0x00400000 /* implemented in software */
#define _SUN4C_PAGE_ACCESSED 0x00200000 /* implemented in software */
#define _SUN4C_PAGE_MODIFIED 0x00100000 /* implemented in software */
#define _SUN4C_READABLE (_SUN4C_PAGE_READ|_SUN4C_PAGE_SILENT_READ|\
_SUN4C_PAGE_ACCESSED)
#define _SUN4C_WRITEABLE (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE|\
_SUN4C_PAGE_MODIFIED)
#define _SUN4C_PAGE_CHG_MASK (0xffff|_SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_MODIFIED)
#define SUN4C_PAGE_NONE __pgprot(_SUN4C_PAGE_PRESENT)
#define SUN4C_PAGE_SHARED __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE|\
_SUN4C_PAGE_WRITE)
#define SUN4C_PAGE_COPY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
#define SUN4C_PAGE_READONLY __pgprot(_SUN4C_PAGE_PRESENT|_SUN4C_READABLE)
#define SUN4C_PAGE_KERNEL __pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\
_SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV)
/* SUN4C swap entry encoding
*
* We use 5 bits for the type and 19 for the offset. This gives us
* 32 swapfiles of 4GB each. Encoding looks like:
*
* RRRRRRRRooooooooooooooooooottttt
* fedcba9876543210fedcba9876543210
*
* The top 8 bits are reserved for protection and status bits, especially
* FILE and PRESENT.
*/
#define SUN4C_SWP_TYPE_MASK 0x1f
#define SUN4C_SWP_OFF_MASK 0x7ffff
#define SUN4C_SWP_OFF_SHIFT 5
#ifndef __ASSEMBLY__
static inline unsigned long sun4c_get_synchronous_error(void)
{
unsigned long sync_err;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (sync_err) :
"r" (AC_SYNC_ERR), "i" (ASI_CONTROL));
return sync_err;
}
static inline unsigned long sun4c_get_synchronous_address(void)
{
unsigned long sync_addr;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (sync_addr) :
"r" (AC_SYNC_VA), "i" (ASI_CONTROL));
return sync_addr;
}
/* SUN4 pte, segmap, and context manipulation */
static inline unsigned long sun4c_get_segmap(unsigned long addr)
{
register unsigned long entry;
__asm__ __volatile__("\n\tlduha [%1] %2, %0\n\t" :
"=r" (entry) :
"r" (addr), "i" (ASI_SEGMAP));
return entry;
}
static inline void sun4c_put_segmap(unsigned long addr, unsigned long entry)
{
__asm__ __volatile__("\n\tstha %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr), "r" (entry),
"i" (ASI_SEGMAP)
: "memory");
}
static inline unsigned long sun4c_get_pte(unsigned long addr)
{
register unsigned long entry;
__asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
"=r" (entry) :
"r" (addr), "i" (ASI_PTE));
return entry;
}
static inline void sun4c_put_pte(unsigned long addr, unsigned long entry)
{
__asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr),
"r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)
: "memory");
}
static inline int sun4c_get_context(void)
{
register int ctx;
__asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
"=r" (ctx) :
"r" (AC_CONTEXT), "i" (ASI_CONTROL));
return ctx;
}
static inline int sun4c_set_context(int ctx)
{
__asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : :
"r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)
: "memory");
return ctx;
}
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC_PGTSUN4_H) */
...@@ -95,7 +95,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) ...@@ -95,7 +95,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
* Observe the order of get_free_pages() in alloc_thread_info_node(). * Observe the order of get_free_pages() in alloc_thread_info_node().
* The sun4 has 8K stack too, because it's short on memory, and 16K is a waste. * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
*/ */
#define THREAD_SIZE 8192 #define THREAD_SIZE (2 * PAGE_SIZE)
/* /*
* Offsets in thread_info structure, used in assembly code * Offsets in thread_info structure, used in assembly code
......
...@@ -40,60 +40,5 @@ ___xchg32_sun4md: ...@@ -40,60 +40,5 @@ ___xchg32_sun4md:
mov %g4, %o7 mov %g4, %o7
#endif #endif
/* Read asm-sparc/atomic.h carefully to understand how this works for SMP.
* Really, some things here for SMP are overly clever, go read the header.
*/
.globl ___atomic24_add
___atomic24_add:
rd %psr, %g3 ! Keep the code small, old way was stupid
nop; nop; nop; ! Let the bits set
or %g3, PSR_PIL, %g7 ! Disable interrupts
wr %g7, 0x0, %psr ! Set %psr
nop; nop; nop; ! Let the bits set
#ifdef CONFIG_SMP
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
ld [%g1], %g7 ! Load locked atomic24_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
add %g7, %g2, %g2 ! Add in argument
sll %g2, 8, %g7 ! Transpose back to atomic24_t
st %g7, [%g1] ! Clever: This releases the lock as well.
#else
ld [%g1], %g7 ! Load locked atomic24_t
add %g7, %g2, %g2 ! Add in argument
st %g2, [%g1] ! Store it back
#endif
wr %g3, 0x0, %psr ! Restore original PSR_PIL
nop; nop; nop; ! Let the bits set
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7
.globl ___atomic24_sub
___atomic24_sub:
rd %psr, %g3 ! Keep the code small, old way was stupid
nop; nop; nop; ! Let the bits set
or %g3, PSR_PIL, %g7 ! Disable interrupts
wr %g7, 0x0, %psr ! Set %psr
nop; nop; nop; ! Let the bits set
#ifdef CONFIG_SMP
1: ldstub [%g1 + 3], %g7 ! Spin on the byte lock for SMP.
orcc %g7, 0x0, %g0 ! Did we get it?
bne 1b ! Nope...
ld [%g1], %g7 ! Load locked atomic24_t
sra %g7, 8, %g7 ! Get signed 24-bit integer
sub %g7, %g2, %g2 ! Subtract argument
sll %g2, 8, %g7 ! Transpose back to atomic24_t
st %g7, [%g1] ! Clever: This releases the lock as well
#else
ld [%g1], %g7 ! Load locked atomic24_t
sub %g7, %g2, %g2 ! Subtract argument
st %g2, [%g1] ! Store it back
#endif
wr %g3, 0x0, %psr ! Restore original PSR_PIL
nop; nop; nop; ! Let the bits set
jmpl %o7, %g0 ! NOTE: not + 8, see callers in atomic.h
mov %g4, %o7 ! Restore %o7
.globl __atomic_end .globl __atomic_end
__atomic_end: __atomic_end:
...@@ -62,8 +62,6 @@ extern void ___rw_read_enter(void); ...@@ -62,8 +62,6 @@ extern void ___rw_read_enter(void);
extern void ___rw_read_try(void); extern void ___rw_read_try(void);
extern void ___rw_read_exit(void); extern void ___rw_read_exit(void);
extern void ___rw_write_enter(void); extern void ___rw_write_enter(void);
extern void ___atomic24_add(void);
extern void ___atomic24_sub(void);
/* Alias functions whose names begin with "." and export the aliases. /* Alias functions whose names begin with "." and export the aliases.
* The module references will be fixed up by module_frob_arch_sections. * The module references will be fixed up by module_frob_arch_sections.
...@@ -97,10 +95,6 @@ EXPORT_SYMBOL(___rw_read_exit); ...@@ -97,10 +95,6 @@ EXPORT_SYMBOL(___rw_read_exit);
EXPORT_SYMBOL(___rw_write_enter); EXPORT_SYMBOL(___rw_write_enter);
#endif #endif
/* Atomic operations. */
EXPORT_SYMBOL(___atomic24_add);
EXPORT_SYMBOL(___atomic24_sub);
EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__lshrdi3);
......
...@@ -577,7 +577,7 @@ static int __devinit apbuart_probe(struct platform_device *op) ...@@ -577,7 +577,7 @@ static int __devinit apbuart_probe(struct platform_device *op)
return 0; return 0;
} }
static struct of_device_id __initdata apbuart_match[] = { static struct of_device_id apbuart_match[] = {
{ {
.name = "GAISLER_APBUART", .name = "GAISLER_APBUART",
}, },
...@@ -597,7 +597,7 @@ static struct platform_driver grlib_apbuart_of_driver = { ...@@ -597,7 +597,7 @@ static struct platform_driver grlib_apbuart_of_driver = {
}; };
static int grlib_apbuart_configure(void) static int __init grlib_apbuart_configure(void)
{ {
struct device_node *np; struct device_node *np;
int line = 0; int line = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment