Commit 73c1377d authored by David S. Miller's avatar David S. Miller

sparc32: Kill btfixup for xchg()'s 'swap' instruction.

We always have this instruction available, so no need to use
btfixup for it any more.

This also eradicates the whole of atomic_32.S and thus the
__atomic_begin and __atomic_end symbols completely.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0f031b3f
...@@ -13,38 +13,13 @@ ...@@ -13,38 +13,13 @@
#include <asm/btfixup.h> #include <asm/btfixup.h>
/* This has special calling conventions */
#ifndef CONFIG_SMP
BTFIXUPDEF_CALL(void, ___xchg32, void)
#endif
static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
{ {
#ifdef CONFIG_SMP
__asm__ __volatile__("swap [%2], %0" __asm__ __volatile__("swap [%2], %0"
: "=&r" (val) : "=&r" (val)
: "0" (val), "r" (m) : "0" (val), "r" (m)
: "memory"); : "memory");
return val; return val;
#else
register unsigned long *ptr asm("g1");
register unsigned long ret asm("g2");
ptr = (unsigned long *) m;
ret = val;
/* Note: this is magic and the nop there is
really needed. */
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___f____xchg32\n\t"
" nop\n\t"
: "=&r" (ret)
: "0" (ret), "r" (ptr)
: "g3", "g4", "g7", "memory", "cc");
return ret;
#endif
} }
extern void __xchg_called_with_bad_pointer(void); extern void __xchg_called_with_bad_pointer(void);
......
...@@ -29,9 +29,7 @@ EXPORT_SYMBOL(__ret_efault); ...@@ -29,9 +29,7 @@ EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
/* Defined using magic */ /* Defined using magic */
#ifndef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
#else
EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id)); EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
#endif #endif
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea)); EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
......
...@@ -68,7 +68,6 @@ static int set_rtc_mmss(unsigned long); ...@@ -68,7 +68,6 @@ static int set_rtc_mmss(unsigned long);
unsigned long profile_pc(struct pt_regs *regs) unsigned long profile_pc(struct pt_regs *regs)
{ {
extern char __copy_user_begin[], __copy_user_end[]; extern char __copy_user_begin[], __copy_user_end[];
extern char __atomic_begin[], __atomic_end[];
extern char __bzero_begin[], __bzero_end[]; extern char __bzero_begin[], __bzero_end[];
unsigned long pc = regs->pc; unsigned long pc = regs->pc;
...@@ -76,8 +75,6 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -76,8 +75,6 @@ unsigned long profile_pc(struct pt_regs *regs)
if (in_lock_functions(pc) || if (in_lock_functions(pc) ||
(pc >= (unsigned long) __copy_user_begin && (pc >= (unsigned long) __copy_user_begin &&
pc < (unsigned long) __copy_user_end) || pc < (unsigned long) __copy_user_end) ||
(pc >= (unsigned long) __atomic_begin &&
pc < (unsigned long) __atomic_end) ||
(pc >= (unsigned long) __bzero_begin && (pc >= (unsigned long) __bzero_begin &&
pc < (unsigned long) __bzero_end)) pc < (unsigned long) __bzero_end))
pc = regs->u_regs[UREG_RETPC]; pc = regs->u_regs[UREG_RETPC];
......
...@@ -13,7 +13,7 @@ lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o ...@@ -13,7 +13,7 @@ lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
lib-y += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o lib-y += strncpy_from_user_$(BITS).o strlen_user_$(BITS).o
lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
lib-$(CONFIG_SPARC32) += copy_user.o locks.o lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-y += atomic_$(BITS).o lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
......
/* atomic.S: Move this stuff here for better ICACHE hit rates.
*
* Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
*/
#include <asm/ptrace.h>
#include <asm/psr.h>
.text
.align 4
.globl __atomic_begin
__atomic_begin:
#ifndef CONFIG_SMP
.globl ___xchg32_sun4md
___xchg32_sun4md:
swap [%g1], %g2
jmpl %o7 + 8, %g0
mov %g4, %o7
#endif
.globl __atomic_end
__atomic_end:
...@@ -1980,13 +1980,8 @@ void __init load_mmu(void) ...@@ -1980,13 +1980,8 @@ void __init load_mmu(void)
{ {
extern void ld_mmu_iommu(void); extern void ld_mmu_iommu(void);
extern void ld_mmu_iounit(void); extern void ld_mmu_iounit(void);
extern void ___xchg32_sun4md(void);
/* Functions */ /* Functions */
#ifndef CONFIG_SMP
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
#endif
BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
get_srmmu_type(); get_srmmu_type();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment