Commit 8e206637 authored by Russell King's avatar Russell King

[ARM] Fixups for GCC 3.x:

 - Provide an ARM architecture major version number so we can use the
   preprocessor more effectively to select features that ARMv4 and
   above contain.
 - Fix GCC 3.1 multi-line __asm__ build warnings
parent 3fd970f0
......@@ -26,11 +26,14 @@ apcs-$(CONFIG_CPU_32) :=-mapcs-32
apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3 -Os
# This selects which instruction set is used.
# Note that GCC is lame - it doesn't numerically define an
# architecture version macro, but instead defines a whole
# series of macros.
arch-y :=
arch-$(CONFIG_CPU_32v3) :=-march=armv3
arch-$(CONFIG_CPU_32v4) :=-march=armv4
arch-$(CONFIG_CPU_32v5) :=-march=armv5
arch-$(CONFIG_CPU_XSCALE) :=-march=armv4 -Wa,-mxscale #-march=armv5te
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5
arch-$(CONFIG_CPU_XSCALE) :=-D__LINUX_ARM_ARCH__=5 -march=armv4 -Wa,-mxscale #-march=armv5te
# This selects how we optimise for the processor.
tune-y :=
......
......@@ -386,16 +386,16 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
pid_t __ret;
__asm__ __volatile__(
"orr r0, %1, %2 @ kernel_thread sys_clone
mov r1, #0
"__syscall(clone)"
movs %0, r0 @ if we are the child
bne 1f
mov fp, #0 @ ensure that fp is zero
mov r0, %4
mov lr, pc
mov pc, %3
b sys_exit
"orr r0, %1, %2 @ kernel_thread sys_clone \n\
mov r1, #0 \n\
"__syscall(clone)" \n\
movs %0, r0 @ if we are the child \n\
bne 1f \n\
mov fp, #0 @ ensure that fp is zero \n\
mov r0, %4 \n\
mov lr, pc \n\
mov pc, %3 \n\
b sys_exit \n\
1: "
: "=r" (__ret)
: "Ir" (flags), "I" (CLONE_VM), "r" (fn), "r" (arg)
......
......@@ -178,76 +178,76 @@ int __down_trylock(struct semaphore * sem)
* value in some cases..
*/
#ifdef CONFIG_CPU_26
asm(" .align 5
.globl __down_failed
__down_failed:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __down
ldmfd sp!, {r0 - r3, pc}^
.align 5
.globl __down_interruptible_failed
__down_interruptible_failed:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __down_interruptible
mov ip, r0
ldmfd sp!, {r0 - r3, pc}^
.align 5
.globl __down_trylock_failed
__down_trylock_failed:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __down_trylock
mov ip, r0
ldmfd sp!, {r0 - r3, pc}^
.align 5
.globl __up_wakeup
__up_wakeup:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __up
ldmfd sp!, {r0 - r3, pc}^
asm(" .align 5 \n\
.globl __down_failed \n\
__down_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __down \n\
ldmfd sp!, {r0 - r3, pc}^ \n\
\n\
.align 5 \n\
.globl __down_interruptible_failed \n\
__down_interruptible_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __down_interruptible \n\
mov ip, r0 \n\
ldmfd sp!, {r0 - r3, pc}^ \n\
\n\
.align 5 \n\
.globl __down_trylock_failed \n\
__down_trylock_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __down_trylock \n\
mov ip, r0 \n\
ldmfd sp!, {r0 - r3, pc}^ \n\
\n\
.align 5 \n\
.globl __up_wakeup \n\
__up_wakeup: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __up \n\
ldmfd sp!, {r0 - r3, pc}^ \n\
");
#else
/* 32 bit version */
asm(" .align 5
.globl __down_failed
__down_failed:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __down
ldmfd sp!, {r0 - r3, pc}
.align 5
.globl __down_interruptible_failed
__down_interruptible_failed:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __down_interruptible
mov ip, r0
ldmfd sp!, {r0 - r3, pc}
.align 5
.globl __down_trylock_failed
__down_trylock_failed:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __down_trylock
mov ip, r0
ldmfd sp!, {r0 - r3, pc}
.align 5
.globl __up_wakeup
__up_wakeup:
stmfd sp!, {r0 - r3, lr}
mov r0, ip
bl __up
ldmfd sp!, {r0 - r3, pc}
asm(" .align 5 \n\
.globl __down_failed \n\
__down_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __down \n\
ldmfd sp!, {r0 - r3, pc} \n\
\n\
.align 5 \n\
.globl __down_interruptible_failed \n\
__down_interruptible_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __down_interruptible \n\
mov ip, r0 \n\
ldmfd sp!, {r0 - r3, pc} \n\
\n\
.align 5 \n\
.globl __down_trylock_failed \n\
__down_trylock_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __down_trylock \n\
mov ip, r0 \n\
ldmfd sp!, {r0 - r3, pc} \n\
\n\
.align 5 \n\
.globl __up_wakeup \n\
__up_wakeup: \n\
stmfd sp!, {r0 - r3, lr} \n\
mov r0, ip \n\
bl __up \n\
ldmfd sp!, {r0 - r3, pc} \n\
");
#endif
......@@ -49,7 +49,7 @@ td3 .req lr
/* we are now half-word aligned */
.less8_wordlp:
#ifdef __ARM_ARCH_4__
#if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2
sub len, len, #2
#else
......@@ -83,7 +83,7 @@ td3 .req lr
adcnes sum, sum, td0, lsl #byte(1) @ update checksum
tst buf, #2 @ 32-bit aligned?
#ifdef __ARM_ARCH_4__
#if __LINUX_ARM_ARCH__ >= 4
ldrneh td0, [buf], #2 @ make 32-bit aligned
subne len, len, #2
#else
......
......@@ -75,7 +75,7 @@
#if defined (__arm__)
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("adds %1, %4, %5
__asm__ ("adds %1, %4, %5 \n\
adc %0, %2, %3" \
: "=r" ((USItype) (sh)), \
"=&r" ((USItype) (sl)) \
......@@ -84,7 +84,7 @@
"%r" ((USItype) (al)), \
"rI" ((USItype) (bl)))
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("subs %1, %4, %5
__asm__ ("subs %1, %4, %5 \n\
sbc %0, %2, %3" \
: "=r" ((USItype) (sh)), \
"=&r" ((USItype) (sl)) \
......@@ -94,18 +94,18 @@
"rI" ((USItype) (bl)))
#define umul_ppmm(xh, xl, a, b) \
{register USItype __t0, __t1, __t2; \
__asm__ ("%@ Inlined umul_ppmm
mov %2, %5, lsr #16
mov %0, %6, lsr #16
bic %3, %5, %2, lsl #16
bic %4, %6, %0, lsl #16
mul %1, %3, %4
mul %4, %2, %4
mul %3, %0, %3
mul %0, %2, %0
adds %3, %4, %3
addcs %0, %0, #65536
adds %1, %1, %3, lsl #16
__asm__ ("%@ Inlined umul_ppmm \n\
mov %2, %5, lsr #16 \n\
mov %0, %6, lsr #16 \n\
bic %3, %5, %2, lsl #16 \n\
bic %4, %6, %0, lsl #16 \n\
mul %1, %3, %4 \n\
mul %4, %2, %4 \n\
mul %3, %0, %3 \n\
mul %0, %2, %0 \n\
adds %3, %4, %3 \n\
addcs %0, %0, #65536 \n\
adds %1, %1, %3, lsl #16 \n\
adc %0, %0, %3, lsr #16" \
: "=&r" ((USItype) (xh)), \
"=r" ((USItype) (xl)), \
......
......@@ -33,18 +33,18 @@ Boston, MA 02111-1307, USA. */
#define umul_ppmm(xh, xl, a, b) \
{register USItype __t0, __t1, __t2; \
__asm__ ("%@ Inlined umul_ppmm
mov %2, %5, lsr #16
mov %0, %6, lsr #16
bic %3, %5, %2, lsl #16
bic %4, %6, %0, lsl #16
mul %1, %3, %4
mul %4, %2, %4
mul %3, %0, %3
mul %0, %2, %0
adds %3, %4, %3
addcs %0, %0, #65536
adds %1, %1, %3, lsl #16
__asm__ ("%@ Inlined umul_ppmm \n\
mov %2, %5, lsr #16 \n\
mov %0, %6, lsr #16 \n\
bic %3, %5, %2, lsl #16 \n\
bic %4, %6, %0, lsl #16 \n\
mul %1, %3, %4 \n\
mul %4, %2, %4 \n\
mul %3, %0, %3 \n\
mul %0, %2, %0 \n\
adds %3, %4, %3 \n\
addcs %0, %0, #65536 \n\
adds %1, %1, %3, lsl #16 \n\
adc %0, %0, %3, lsr #16" \
: "=&r" ((USItype) (xh)), \
"=r" ((USItype) (xl)), \
......
......@@ -184,9 +184,9 @@ INLINE float32 packFloat32( flag zSign, int16 zExp, bits32 zSig )
{
#if 0
float32 f;
__asm__("@ packFloat32;
mov %0, %1, asl #31;
orr %0, %2, asl #23;
__asm__("@ packFloat32; \n\
mov %0, %1, asl #31; \n\
orr %0, %2, asl #23; \n\
orr %0, %3"
: /* no outputs */
: "g" (f), "g" (zSign), "g" (zExp), "g" (zSig)
......
......@@ -36,7 +36,7 @@
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
#ifdef __ARM_ARCH_4__
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
#else
#define vectors_base() (0)
......@@ -192,7 +192,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
: "memory");
break;
#endif
default: __bad_xchg(ptr, size);
default: __bad_xchg(ptr, size), ret = 0;
}
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment