Commit 89eda163 authored by Keith M. Wesolowski's avatar Keith M. Wesolowski

[SPARC32] Uninline atomic_t functions to save space.

parent 7d202bd0
...@@ -56,9 +56,6 @@ int smp_activated = 0; ...@@ -56,9 +56,6 @@ int smp_activated = 0;
volatile int __cpu_number_map[NR_CPUS]; volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS]; volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */ cycles_t cacheflush_time = 0; /* XXX */
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
};
/* The only guaranteed locking primitive available on all Sparc /* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
......
...@@ -7,5 +7,5 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02 ...@@ -7,5 +7,5 @@ EXTRA_AFLAGS := -ansi -DST_DIV0=0x02
lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \ lib-y := mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \ strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \ strncpy_from_user.o divdi3.o udivdi3.o strlen_user.o \
copy_user.o locks.o atomic.o bitops.o debuglocks.o lshrdi3.o \ copy_user.o locks.o atomic.o atomic32.o bitops.o debuglocks.o \
ashldi3.o rwsem.o muldi3.o bitext.o lshrdi3.o ashldi3.o rwsem.o muldi3.o bitext.o
/*
* atomic32.c: 32-bit atomic_t implementation
*
* Copyright (C) 2004 Keith M Wesolowski
*
* Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
*/
#include <asm/atomic.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#ifdef CONFIG_SMP
#define ATOMIC_HASH_SIZE 4
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
};
#else /* SMP */
#define ATOMIC_HASH_SIZE 1
#define ATOMIC_HASH(a) 0
#endif /* SMP */
int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = (v->counter += i);
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
EXPORT_SYMBOL(__atomic_add_return);
EXPORT_SYMBOL(atomic_set);
...@@ -11,49 +11,16 @@ ...@@ -11,49 +11,16 @@
#define __ARCH_SPARC_ATOMIC__ #define __ARCH_SPARC_ATOMIC__
#include <linux/config.h> #include <linux/config.h>
#include <linux/spinlock.h>
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_SMP
#define ATOMIC_HASH_SIZE 4
#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
#else /* SMP */
#define ATOMIC_HASH_SIZE 1
#define ATOMIC_HASH(a) 0
#endif /* SMP */
static inline int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = (v->counter += i);
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment