Commit 0b1a47c2 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: inline assemblies.

Optimize s390 inline assemblies.
parent d5cb012f
...@@ -513,10 +513,9 @@ int __cpu_up(unsigned int cpu) ...@@ -513,10 +513,9 @@ int __cpu_up(unsigned int cpu)
cpu_lowcore->kernel_stack = (unsigned long) cpu_lowcore->kernel_stack = (unsigned long)
idle->thread_info + (THREAD_SIZE); idle->thread_info + (THREAD_SIZE);
__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
__asm__ __volatile__("la 1,%0\n\t" __asm__ __volatile__("stam 0,15,0(%0)"
"stam 0,15,0(1)" : : "a" (&cpu_lowcore->access_regs_save_area)
: "=m" (cpu_lowcore->access_regs_save_area[0]) : "memory");
: : "1", "memory");
eieio(); eieio();
signal_processor(cpu,sigp_restart); signal_processor(cpu,sigp_restart);
......
...@@ -25,12 +25,9 @@ extern struct task_struct *resume(void *, void *); ...@@ -25,12 +25,9 @@ extern struct task_struct *resume(void *, void *);
#ifdef __s390x__ #ifdef __s390x__
#define __FLAG_SHIFT 56 #define __FLAG_SHIFT 56
extern void __misaligned_u16(void); #else /* ! __s390x__ */
extern void __misaligned_u32(void);
extern void __misaligned_u64(void);
#else /* __s390x__ */
#define __FLAG_SHIFT 24 #define __FLAG_SHIFT 24
#endif /* __s390x__ */ #endif /* ! __s390x__ */
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline void save_fp_regs(s390_fp_regs *fpregs)
{ {
...@@ -301,56 +298,52 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -301,56 +298,52 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define __ctl_load(array, low, high) ({ \ #define __ctl_load(array, low, high) ({ \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" \ " bras 1,0f\n" \
" bras 2,0f\n" \ " lctlg 0,0,0(%0)\n" \
" lctlg 0,0,0(1)\n" \ "0: ex %1,0(1)" \
"0: ex %1,0(2)" \ : : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
: : "m" (array), "a" (((low)<<4)+(high)) : "1", "2" ); \
}) })
#define __ctl_store(array, low, high) ({ \ #define __ctl_store(array, low, high) ({ \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" \ " bras 1,0f\n" \
" bras 2,0f\n" \ " stctg 0,0,0(%1)\n" \
" stctg 0,0,0(1)\n" \ "0: ex %2,0(1)" \
"0: ex %1,0(2)" \ : "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
: "=m" (array) : "a" (((low)<<4)+(high)): "1", "2" ); \
}) })
#define __ctl_set_bit(cr, bit) ({ \ #define __ctl_set_bit(cr, bit) ({ \
__u8 __dummy[24]; \ __u8 __dummy[24]; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \ " bras 1,0f\n" /* skip indirect insns */ \
" aghi 1,7\n" \ " stctg 0,0,0(%1)\n" \
" nill 1,0xfff8\n" \ " lctlg 0,0,0(%1)\n" \
" bras 2,0f\n" /* skip indirect insns */ \ "0: ex %2,0(1)\n" /* execute stctl */ \
" stctg 0,0,0(1)\n" \ " lg 0,0(%1)\n" \
" lctlg 0,0,0(1)\n" \ " ogr 0,%3\n" /* set the bit */ \
"0: ex %1,0(2)\n" /* execute stctl */ \ " stg 0,0(%1)\n" \
" lg 0,0(1)\n" \ "1: ex %2,6(1)" /* execute lctl */ \
" ogr 0,%2\n" /* set the bit */ \ : "=m" (__dummy) \
" stg 0,0(1)\n" \ : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"1: ex %1,6(2)" /* execute lctl */ \ "a" (cr*17), "a" (1L<<(bit)) \
: "=m" (__dummy) : "a" (cr*17), "a" (1L<<(bit)) \ : "cc", "0", "1" ); \
: "cc", "0", "1", "2"); \
}) })
#define __ctl_clear_bit(cr, bit) ({ \ #define __ctl_clear_bit(cr, bit) ({ \
__u8 __dummy[24]; \ __u8 __dummy[16]; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \ " bras 1,0f\n" /* skip indirect insns */ \
" aghi 1,7\n" \ " stctg 0,0,0(%1)\n" \
" nill 1,0xfff8\n" \ " lctlg 0,0,0(%1)\n" \
" bras 2,0f\n" /* skip indirect insns */ \ "0: ex %2,0(1)\n" /* execute stctl */ \
" stctg 0,0,0(1)\n" \ " lg 0,0(%1)\n" \
" lctlg 0,0,0(1)\n" \ " ngr 0,%3\n" /* set the bit */ \
"0: ex %1,0(2)\n" /* execute stctl */ \ " stg 0,0(%1)\n" \
" lg 0,0(1)\n" \ "1: ex %2,6(1)" /* execute lctl */ \
" ngr 0,%2\n" /* set the bit */ \ : "=m" (__dummy) \
" stg 0,0(1)\n" \ : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"1: ex %1,6(2)" /* execute lctl */ \ "a" (cr*17), "a" (~(1L<<(bit))) \
: "=m" (__dummy) : "a" (cr*17), "a" (~(1L<<(bit))) \ : "cc", "0", "1" ); \
: "cc", "0", "1", "2"); \
}) })
#else /* __s390x__ */ #else /* __s390x__ */
...@@ -360,58 +353,52 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -360,58 +353,52 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define __ctl_load(array, low, high) ({ \ #define __ctl_load(array, low, high) ({ \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" \ " bras 1,0f\n" \
" bras 2,0f\n" \ " lctl 0,0,0(%0)\n" \
" lctl 0,0,0(1)\n" \ "0: ex %1,0(1)" \
"0: ex %1,0(2)" \ : : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
: : "m" (array), "a" (((low)<<4)+(high)) : "1", "2" ); \
}) })
#define __ctl_store(array, low, high) ({ \ #define __ctl_store(array, low, high) ({ \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" \ " bras 1,0f\n" \
" bras 2,0f\n" \ " stctl 0,0,0(%1)\n" \
" stctl 0,0,0(1)\n" \ "0: ex %2,0(1)" \
"0: ex %1,0(2)" \ : "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
: "=m" (array) : "a" (((low)<<4)+(high)): "1", "2" ); \
}) })
#define __ctl_set_bit(cr, bit) ({ \ #define __ctl_set_bit(cr, bit) ({ \
__u8 __dummy[16]; \ __u8 __dummy[16]; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \ " bras 1,0f\n" /* skip indirect insns */ \
" ahi 1,7\n" \ " stctl 0,0,0(%1)\n" \
" srl 1,3\n" \ " lctl 0,0,0(%1)\n" \
" sll 1,3\n" \ "0: ex %2,0(1)\n" /* execute stctl */ \
" bras 2,0f\n" /* skip indirect insns */ \ " l 0,0(%1)\n" \
" stctl 0,0,0(1)\n" \ " or 0,%3\n" /* set the bit */ \
" lctl 0,0,0(1)\n" \ " st 0,0(%1)\n" \
"0: ex %1,0(2)\n" /* execute stctl */ \ "1: ex %2,4(1)" /* execute lctl */ \
" l 0,0(1)\n" \ : "=m" (__dummy) \
" or 0,%2\n" /* set the bit */ \ : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
" st 0,0(1)\n" \ "a" (cr*17), "a" (1<<(bit)) \
"1: ex %1,4(2)" /* execute lctl */ \ : "cc", "0", "1" ); \
: "=m" (__dummy) : "a" (cr*17), "a" (1<<(bit)) \
: "cc", "0", "1", "2"); \
}) })
#define __ctl_clear_bit(cr, bit) ({ \ #define __ctl_clear_bit(cr, bit) ({ \
__u8 __dummy[16]; \ __u8 __dummy[16]; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \ " bras 1,0f\n" /* skip indirect insns */ \
" ahi 1,7\n" \ " stctl 0,0,0(%1)\n" \
" srl 1,3\n" \ " lctl 0,0,0(%1)\n" \
" sll 1,3\n" \ "0: ex %2,0(1)\n" /* execute stctl */ \
" bras 2,0f\n" /* skip indirect insns */ \ " l 0,0(%1)\n" \
" stctl 0,0,0(1)\n" \ " nr 0,%3\n" /* set the bit */ \
" lctl 0,0,0(1)\n" \ " st 0,0(%1)\n" \
"0: ex %1,0(2)\n" /* execute stctl */ \ "1: ex %2,4(1)" /* execute lctl */ \
" l 0,0(1)\n" \ : "=m" (__dummy) \
" nr 0,%2\n" /* set the bit */ \ : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
" st 0,0(1)\n" \ "a" (cr*17), "a" (~(1<<(bit))) \
"1: ex %1,4(2)" /* execute lctl */ \ : "cc", "0", "1" ); \
: "=m" (__dummy) : "a" (cr*17), "a" (~(1<<(bit))) \
: "cc", "0", "1", "2"); \
}) })
#endif /* __s390x__ */ #endif /* __s390x__ */
......
...@@ -76,13 +76,16 @@ static inline void global_flush_tlb(void) ...@@ -76,13 +76,16 @@ static inline void global_flush_tlb(void)
} }
#endif /* __s390x__ */ #endif /* __s390x__ */
{ {
long dummy = 0; register unsigned long addr asm("4");
long dummy;
dummy = 0;
addr = ((unsigned long) &dummy) + 1;
__asm__ __volatile__ ( __asm__ __volatile__ (
" la 4,1(%0)\n"
" slr 2,2\n" " slr 2,2\n"
" slr 3,3\n" " slr 3,3\n"
" csp 2,4" " csp 2,%0"
: : "a" (&dummy) : "cc", "2", "3", "4" ); : : "a" (addr) : "cc", "2", "3" );
} }
} }
......
...@@ -113,82 +113,83 @@ struct exception_table_entry ...@@ -113,82 +113,83 @@ struct exception_table_entry
#define __put_user_asm_8(x, ptr, err) \ #define __put_user_asm_8(x, ptr, err) \
({ \ ({ \
register __typeof__(x) const * __from asm("2"); \
register __typeof__(*(ptr)) * __to asm("4"); \
__from = &(x); \
__to = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 2,%2\n" \
" la 4,%1\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: mvc 0(8,4),0(2)\n" \ "0: mvc 0(8,%1),0(%2)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err) \ : "=&d" (err) \
: "m" (*(__u64*)(ptr)), "m" (x), "K" (-EFAULT) \ : "a" (__to),"a" (__from),"K" (-EFAULT),"0" (0) \
: "cc", "2", "4" ); \ : "cc" ); \
}) })
#else /* __s390x__ */ #else /* __s390x__ */
#define __put_user_asm_8(x, ptr, err) \ #define __put_user_asm_8(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%1\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: stg %2,0(4)\n" \ "0: stg %2,0(%1)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err) \ : "=&d" (err) \
: "m" (*(__u64*)(ptr)), "d" (x), "K" (-EFAULT) \ : "a" (__ptr), "d" (x), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#endif /* __s390x__ */ #endif /* __s390x__ */
#define __put_user_asm_4(x, ptr, err) \ #define __put_user_asm_4(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%1\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: st %2,0(4)\n" \ "0: st %2,0(%1)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err) \ : "=&d" (err) \
: "m" (*(__u32*)(ptr)), "d" (x), "K" (-EFAULT) \ : "a" (__ptr), "d" (x), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#define __put_user_asm_2(x, ptr, err) \ #define __put_user_asm_2(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%1\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: sth %2,0(4)\n" \ "0: sth %2,0(%1)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err) \ : "=&d" (err) \
: "m" (*(__u16*)(ptr)), "d" (x), "K" (-EFAULT) \ : "a" (__ptr), "d" (x), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#define __put_user_asm_1(x, ptr, err) \ #define __put_user_asm_1(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%1\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: stc %2,0(4)\n" \ "0: stc %2,0(%1)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err) \ : "=&d" (err) \
: "m" (*(__u8*)(ptr)), "d" (x), "K" (-EFAULT) \ : "a" (__ptr), "d" (x), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#define __put_user(x, ptr) \ #define __put_user(x, ptr) \
...@@ -223,35 +224,36 @@ extern int __put_user_bad(void); ...@@ -223,35 +224,36 @@ extern int __put_user_bad(void);
#define __get_user_asm_8(x, ptr, err) \ #define __get_user_asm_8(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) const * __from asm("2"); \
register __typeof__(x) * __to asm("4"); \
__from = (ptr); \
__to = &(x); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 2,%1\n" \
" la 4,%2\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: mvc 0(8,2),0(4)\n" \ "0: mvc 0(8,%1),0(%2)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err), "=m" (x) \ : "=&d" (err), "=m" (x) \
: "m" (*(const __u64*)(ptr)),"K" (-EFAULT) \ : "a" (__to),"a" (__from),"K" (-EFAULT),"0" (0) \
: "cc", "2", "4" ); \ : "cc" ); \
}) })
#else /* __s390x__ */ #else /* __s390x__ */
#define __get_user_asm_8(x, ptr, err) \ #define __get_user_asm_8(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) const * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%2\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: lg %1,0(4)\n" \ "0: lg %1,0(%2)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err), "=d" (x) \ : "=&d" (err), "=d" (x) \
: "m" (*(const __u64*)(ptr)),"K" (-EFAULT) \ : "a" (__ptr), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#endif /* __s390x__ */ #endif /* __s390x__ */
...@@ -259,48 +261,48 @@ extern int __put_user_bad(void); ...@@ -259,48 +261,48 @@ extern int __put_user_bad(void);
#define __get_user_asm_4(x, ptr, err) \ #define __get_user_asm_4(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) const * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%2\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: l %1,0(4)\n" \ "0: l %1,0(%2)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err), "=d" (x) \ : "=&d" (err), "=d" (x) \
: "m" (*(const __u32*)(ptr)),"K" (-EFAULT) \ : "a" (__ptr), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#define __get_user_asm_2(x, ptr, err) \ #define __get_user_asm_2(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) const * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%2\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: lh %1,0(4)\n" \ "0: lh %1,0(%2)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err), "=d" (x) \ : "=&d" (err), "=d" (x) \
: "m" (*(const __u16*)(ptr)),"K" (-EFAULT) \ : "a" (__ptr), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#define __get_user_asm_1(x, ptr, err) \ #define __get_user_asm_1(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) const * __ptr asm("4"); \
__ptr = (ptr); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sr %0,%0\n" \
" la 4,%2\n" \
" sr %1,%1\n" \ " sr %1,%1\n" \
" sacf 512\n" \ " sacf 512\n" \
"0: ic %1,0(4)\n" \ "0: ic %1,0(%2)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err), "=d" (x) \ : "=&d" (err), "=d" (x) \
: "m" (*(const __u8*)(ptr)),"K" (-EFAULT) \ : "a" (__ptr), "K" (-EFAULT), "0" (0) \
: "cc", "4" ); \ : "cc" ); \
}) })
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment