Commit 94c12cc7 authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] Inline assembly cleanup.

Major cleanup of all s390 inline assemblies. They now have a common
coding style. Quite a few have been shortened, mainly by using register
asm variables. Use of the EX_TABLE macro helps  as well. The atomic ops,
bit ops and locking inlines new use the Q-constraint if a newer gcc
is used.  That results in slightly better code.

Thanks to Christian Borntraeger for proof reading the changes.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 25d83cbf
...@@ -104,63 +104,6 @@ struct crypt_s390_query_status { ...@@ -104,63 +104,6 @@ struct crypt_s390_query_status {
u64 low; u64 low;
}; };
/*
* Standard fixup and ex_table sections for crypt_s390 inline functions.
* label 0: the s390 crypto operation
* label 1: just after 1 to catch illegal operation exception
* (unsupported model)
* label 6: the return point after fixup
* label 7: set error value if exception _in_ crypto operation
* label 8: set error value if illegal operation exception
* [ret] is the variable to receive the error code
* [ERR] is the error code value
*/
#ifndef CONFIG_64BIT
#define __crypt_s390_fixup \
".section .fixup,\"ax\" \n" \
"7: lhi %0,%h[e1] \n" \
" bras 1,9f \n" \
" .long 6b \n" \
"8: lhi %0,%h[e2] \n" \
" bras 1,9f \n" \
" .long 6b \n" \
"9: l 1,0(1) \n" \
" br 1 \n" \
".previous \n" \
".section __ex_table,\"a\" \n" \
" .align 4 \n" \
" .long 0b,7b \n" \
" .long 1b,8b \n" \
".previous"
#else /* CONFIG_64BIT */
#define __crypt_s390_fixup \
".section .fixup,\"ax\" \n" \
"7: lhi %0,%h[e1] \n" \
" jg 6b \n" \
"8: lhi %0,%h[e2] \n" \
" jg 6b \n" \
".previous\n" \
".section __ex_table,\"a\" \n" \
" .align 8 \n" \
" .quad 0b,7b \n" \
" .quad 1b,8b \n" \
".previous"
#endif /* CONFIG_64BIT */
/*
* Standard code for setting the result of s390 crypto instructions.
* %0: the register which will receive the result
* [result]: the register containing the result (e.g. second operand length
* to compute number of processed bytes].
*/
#ifndef CONFIG_64BIT
#define __crypt_s390_set_result \
" lr %0,%[result] \n"
#else /* CONFIG_64BIT */
#define __crypt_s390_set_result \
" lgr %0,%[result] \n"
#endif
/* /*
* Executes the KM (CIPHER MESSAGE) operation of the CPU. * Executes the KM (CIPHER MESSAGE) operation of the CPU.
* @param func: the function code passed to KM; see crypt_s390_km_func * @param func: the function code passed to KM; see crypt_s390_km_func
...@@ -176,28 +119,24 @@ crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) ...@@ -176,28 +119,24 @@ crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
{ {
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param; register void* __param asm("1") = param;
register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src; register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len; register long __src_len asm("3") = src_len;
register u8* __dest asm("4") = dest;
int ret; int ret;
ret = 0; asm volatile(
__asm__ __volatile__ ( "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
"0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
"1: brc 1,0b \n" /* handle partial completion */ "1: brc 1,0b \n" /* handle partial completion */
__crypt_s390_set_result " ahi %0,%h7\n"
"6: \n" "2: ahi %0,%h8\n"
__crypt_s390_fixup "3:\n"
: "+d" (ret), "+a" (__dest), "+a" (__src), EX_TABLE(0b,3b) EX_TABLE(1b,2b)
[result] "+d" (__src_len) : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), : "d" (__func), "a" (__param), "0" (-EFAULT),
"a" (__param) "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
: "cc", "memory" if (ret < 0)
);
if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret; return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
} }
/* /*
...@@ -215,28 +154,24 @@ crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) ...@@ -215,28 +154,24 @@ crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
{ {
register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param; register void* __param asm("1") = param;
register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src; register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len; register long __src_len asm("3") = src_len;
register u8* __dest asm("4") = dest;
int ret; int ret;
ret = 0; asm volatile(
__asm__ __volatile__ ( "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
"0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
"1: brc 1,0b \n" /* handle partial completion */ "1: brc 1,0b \n" /* handle partial completion */
__crypt_s390_set_result " ahi %0,%h7\n"
"6: \n" "2: ahi %0,%h8\n"
__crypt_s390_fixup "3:\n"
: "+d" (ret), "+a" (__dest), "+a" (__src), EX_TABLE(0b,3b) EX_TABLE(1b,2b)
[result] "+d" (__src_len) : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), : "d" (__func), "a" (__param), "0" (-EFAULT),
"a" (__param) "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
: "cc", "memory" if (ret < 0)
);
if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret; return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
} }
/* /*
...@@ -258,22 +193,19 @@ crypt_s390_kimd(long func, void* param, const u8* src, long src_len) ...@@ -258,22 +193,19 @@ crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
register long __src_len asm("3") = src_len; register long __src_len asm("3") = src_len;
int ret; int ret;
ret = 0; asm volatile(
__asm__ __volatile__ ( "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
"0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */ "1: brc 1,0b \n" /* handle partial completion */
"1: brc 1,0b \n" /* handle partical completion */ " ahi %0,%h6\n"
__crypt_s390_set_result "2: ahi %0,%h7\n"
"6: \n" "3:\n"
__crypt_s390_fixup EX_TABLE(0b,3b) EX_TABLE(1b,2b)
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len) : "=d" (ret), "+a" (__src), "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), : "d" (__func), "a" (__param), "0" (-EFAULT),
"a" (__param) "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
: "cc", "memory" if (ret < 0)
);
if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){
ret = src_len - ret;
}
return ret; return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
} }
/* /*
...@@ -294,22 +226,19 @@ crypt_s390_klmd(long func, void* param, const u8* src, long src_len) ...@@ -294,22 +226,19 @@ crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
register long __src_len asm("3") = src_len; register long __src_len asm("3") = src_len;
int ret; int ret;
ret = 0; asm volatile(
__asm__ __volatile__ ( "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
"0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */ "1: brc 1,0b \n" /* handle partial completion */
"1: brc 1,0b \n" /* handle partical completion */ " ahi %0,%h6\n"
__crypt_s390_set_result "2: ahi %0,%h7\n"
"6: \n" "3:\n"
__crypt_s390_fixup EX_TABLE(0b,3b) EX_TABLE(1b,2b)
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len) : "=d" (ret), "+a" (__src), "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), : "d" (__func), "a" (__param), "0" (-EFAULT),
"a" (__param) "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
: "cc", "memory" if (ret < 0)
);
if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret; return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
} }
/* /*
...@@ -331,22 +260,19 @@ crypt_s390_kmac(long func, void* param, const u8* src, long src_len) ...@@ -331,22 +260,19 @@ crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
register long __src_len asm("3") = src_len; register long __src_len asm("3") = src_len;
int ret; int ret;
ret = 0; asm volatile(
__asm__ __volatile__ ( "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
"0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */ "1: brc 1,0b \n" /* handle partial completion */
"1: brc 1,0b \n" /* handle partical completion */ " ahi %0,%h6\n"
__crypt_s390_set_result "2: ahi %0,%h7\n"
"6: \n" "3:\n"
__crypt_s390_fixup EX_TABLE(0b,3b) EX_TABLE(1b,2b)
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len) : "=d" (ret), "+a" (__src), "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), : "d" (__func), "a" (__param), "0" (-EFAULT),
"a" (__param) "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
: "cc", "memory" if (ret < 0)
);
if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret; return ret;
return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
} }
/** /**
......
...@@ -333,21 +333,13 @@ static int diag204(unsigned long subcode, unsigned long size, void *addr) ...@@ -333,21 +333,13 @@ static int diag204(unsigned long subcode, unsigned long size, void *addr)
register unsigned long _subcode asm("0") = subcode; register unsigned long _subcode asm("0") = subcode;
register unsigned long _size asm("1") = size; register unsigned long _size asm("1") = size;
asm volatile (" diag %2,%0,0x204\n" asm volatile(
"0: \n" ".section __ex_table,\"a\"\n" " diag %2,%0,0x204\n"
#ifndef __s390x__ "0:\n"
" .align 4\n" EX_TABLE(0b,0b)
" .long 0b,0b\n" : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory");
#else
" .align 8\n"
" .quad 0b,0b\n"
#endif
".previous":"+d" (_subcode), "+d"(_size)
:"d"(addr)
:"memory");
if (_subcode) if (_subcode)
return -1; return -1;
else
return _size; return _size;
} }
...@@ -491,8 +483,7 @@ static void *diag204_store(void) ...@@ -491,8 +483,7 @@ static void *diag204_store(void)
static void diag224(void *ptr) static void diag224(void *ptr)
{ {
asm volatile(" diag %0,%1,0x224\n" asm volatile("diag %0,%1,0x224" : :"d" (0), "d"(ptr) : "memory");
: :"d" (0), "d"(ptr) : "memory");
} }
static int diag224_get_name_table(void) static int diag224_get_name_table(void)
......
...@@ -544,10 +544,7 @@ sys32_execve(struct pt_regs regs) ...@@ -544,10 +544,7 @@ sys32_execve(struct pt_regs regs)
current->ptrace &= ~PT_DTRACE; current->ptrace &= ~PT_DTRACE;
task_unlock(current); task_unlock(current);
current->thread.fp_regs.fpc=0; current->thread.fp_regs.fpc=0;
__asm__ __volatile__ asm volatile("sfpc %0,0" : : "d" (0));
("sr 0,0\n\t"
"sfpc 0,0\n\t"
: : :"0");
} }
putname(filename); putname(filename);
out: out:
......
...@@ -25,11 +25,8 @@ static char cpcmd_buf[241]; ...@@ -25,11 +25,8 @@ static char cpcmd_buf[241];
*/ */
int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{ {
const int mask = 0x40000000L; unsigned long flags, cmdlen;
unsigned long flags; int return_code, return_len;
int return_code;
int return_len;
int cmdlen;
spin_lock_irqsave(&cpcmd_lock, flags); spin_lock_irqsave(&cpcmd_lock, flags);
cmdlen = strlen(cmd); cmdlen = strlen(cmd);
...@@ -38,64 +35,44 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) ...@@ -38,64 +35,44 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
ASCEBC(cpcmd_buf, cmdlen); ASCEBC(cpcmd_buf, cmdlen);
if (response != NULL && rlen > 0) { if (response != NULL && rlen > 0) {
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response;
register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
register unsigned long reg5 asm ("5") = rlen;
memset(response, 0, rlen); memset(response, 0, rlen);
asm volatile(
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
asm volatile ( "lra 2,0(%2)\n" " diag %2,%0,0x8\n"
"lr 4,%3\n" " brc 8,1f\n"
"o 4,%6\n" " ar %1,%4\n"
"lra 3,0(%4)\n"
"lr 5,%5\n"
"diag 2,4,0x8\n"
"brc 8, 1f\n"
"ar 5, %5\n"
"1: \n"
"lr %0,4\n"
"lr %1,5\n"
: "=d" (return_code), "=d" (return_len)
: "a" (cpcmd_buf), "d" (cmdlen),
"a" (response), "d" (rlen), "m" (mask)
: "cc", "2", "3", "4", "5" );
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
asm volatile ( "lrag 2,0(%2)\n" " sam31\n"
"lgr 4,%3\n" " diag %2,%0,0x8\n"
"o 4,%6\n" " sam64\n"
"lrag 3,0(%4)\n" " brc 8,1f\n"
"lgr 5,%5\n" " agr %1,%4\n"
"sam31\n"
"diag 2,4,0x8\n"
"sam64\n"
"brc 8, 1f\n"
"agr 5, %5\n"
"1: \n"
"lgr %0,4\n"
"lgr %1,5\n"
: "=d" (return_code), "=d" (return_len)
: "a" (cpcmd_buf), "d" (cmdlen),
"a" (response), "d" (rlen), "m" (mask)
: "cc", "2", "3", "4", "5" );
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
"1:\n"
: "+d" (reg4), "+d" (reg5)
: "d" (reg2), "d" (reg3), "d" (rlen) : "cc");
return_code = (int) reg4;
return_len = (int) reg5;
EBCASC(response, rlen); EBCASC(response, rlen);
} else { } else {
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = cmdlen;
return_len = 0; return_len = 0;
asm volatile(
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
asm volatile ( "lra 2,0(%1)\n" " diag %1,%0,0x8\n"
"lr 3,%2\n"
"diag 2,3,0x8\n"
"lr %0,3\n"
: "=d" (return_code)
: "a" (cpcmd_buf), "d" (cmdlen)
: "2", "3" );
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
asm volatile ( "lrag 2,0(%1)\n" " sam31\n"
"lgr 3,%2\n" " diag %1,%0,0x8\n"
"sam31\n" " sam64\n"
"diag 2,3,0x8\n"
"sam64\n"
"lgr %0,3\n"
: "=d" (return_code)
: "a" (cpcmd_buf), "d" (cmdlen)
: "2", "3" );
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
: "+d" (reg3) : "d" (reg2) : "cc");
return_code = (int) reg3;
} }
spin_unlock_irqrestore(&cpcmd_lock, flags); spin_unlock_irqrestore(&cpcmd_lock, flags);
if (response_code != NULL) if (response_code != NULL)
......
...@@ -120,24 +120,15 @@ static enum shutdown_action on_panic_action = SHUTDOWN_STOP; ...@@ -120,24 +120,15 @@ static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
static int diag308(unsigned long subcode, void *addr) static int diag308(unsigned long subcode, void *addr)
{ {
register unsigned long _addr asm("0") = (unsigned long)addr; register unsigned long _addr asm("0") = (unsigned long) addr;
register unsigned long _rc asm("1") = 0; register unsigned long _rc asm("1") = 0;
asm volatile ( asm volatile(
" diag %0,%2,0x308\n" " diag %0,%2,0x308\n"
"0: \n" "0:\n"
".section __ex_table,\"a\"\n" EX_TABLE(0b,0b)
#ifdef CONFIG_64BIT
" .align 8\n"
" .quad 0b, 0b\n"
#else
" .align 4\n"
" .long 0b, 0b\n"
#endif
".previous\n"
: "+d" (_addr), "+d" (_rc) : "+d" (_addr), "+d" (_rc)
: "d" (subcode) : "cc", "memory" ); : "d" (subcode) : "cc", "memory");
return _rc; return _rc;
} }
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/timer.h> #include <asm/timer.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
/* /*
* Return saved PC of a blocked thread. used in kernel/sched. * Return saved PC of a blocked thread. used in kernel/sched.
...@@ -177,7 +177,8 @@ void show_regs(struct pt_regs *regs) ...@@ -177,7 +177,8 @@ void show_regs(struct pt_regs *regs)
extern void kernel_thread_starter(void); extern void kernel_thread_starter(void);
__asm__(".align 4\n" asm(
".align 4\n"
"kernel_thread_starter:\n" "kernel_thread_starter:\n"
" la 2,0(10)\n" " la 2,0(10)\n"
" basr 14,9\n" " basr 14,9\n"
......
...@@ -26,17 +26,17 @@ static inline int __sem_update_count(struct semaphore *sem, int incr) ...@@ -26,17 +26,17 @@ static inline int __sem_update_count(struct semaphore *sem, int incr)
{ {
int old_val, new_val; int old_val, new_val;
__asm__ __volatile__(" l %0,0(%3)\n" asm volatile(
" l %0,0(%3)\n"
"0: ltr %1,%0\n" "0: ltr %1,%0\n"
" jhe 1f\n" " jhe 1f\n"
" lhi %1,0\n" " lhi %1,0\n"
"1: ar %1,%4\n" "1: ar %1,%4\n"
" cs %0,%1,0(%3)\n" " cs %0,%1,0(%3)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old_val), "=&d" (new_val), : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
"=m" (sem->count)
: "a" (&sem->count), "d" (incr), "m" (sem->count) : "a" (&sem->count), "d" (incr), "m" (sem->count)
: "cc" ); : "cc");
return old_val; return old_val;
} }
......
...@@ -101,7 +101,7 @@ void __devinit cpu_init (void) ...@@ -101,7 +101,7 @@ void __devinit cpu_init (void)
/* /*
* Store processor id in lowcore (used e.g. in timer_interrupt) * Store processor id in lowcore (used e.g. in timer_interrupt)
*/ */
asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); asm volatile("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
S390_lowcore.cpu_data.cpu_addr = addr; S390_lowcore.cpu_data.cpu_addr = addr;
/* /*
......
...@@ -63,7 +63,7 @@ static void smp_ext_bitcall(int, ec_bit_sig); ...@@ -63,7 +63,7 @@ static void smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall_others(ec_bit_sig); static void smp_ext_bitcall_others(ec_bit_sig);
/* /*
* Structure and data for smp_call_function(). This is designed to minimise 5B * Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner. * static memory requirements. It also looks cleaner.
*/ */
static DEFINE_SPINLOCK(call_lock); static DEFINE_SPINLOCK(call_lock);
...@@ -418,59 +418,49 @@ void smp_send_reschedule(int cpu) ...@@ -418,59 +418,49 @@ void smp_send_reschedule(int cpu)
/* /*
* parameter area for the set/clear control bit callbacks * parameter area for the set/clear control bit callbacks
*/ */
typedef struct struct ec_creg_mask_parms {
{
__u16 start_ctl;
__u16 end_ctl;
unsigned long orvals[16]; unsigned long orvals[16];
unsigned long andvals[16]; unsigned long andvals[16];
} ec_creg_mask_parms; };
/* /*
* callback for setting/clearing control bits * callback for setting/clearing control bits
*/ */
void smp_ctl_bit_callback(void *info) { void smp_ctl_bit_callback(void *info) {
ec_creg_mask_parms *pp; struct ec_creg_mask_parms *pp = info;
unsigned long cregs[16]; unsigned long cregs[16];
int i; int i;
pp = (ec_creg_mask_parms *) info; __ctl_store(cregs, 0, 15);
__ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); for (i = 0; i <= 15; i++)
for (i = pp->start_ctl; i <= pp->end_ctl; i++)
cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
__ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); __ctl_load(cregs, 0, 15);
} }
/* /*
* Set a bit in a control register of all cpus * Set a bit in a control register of all cpus
*/ */
void smp_ctl_set_bit(int cr, int bit) { void smp_ctl_set_bit(int cr, int bit)
ec_creg_mask_parms parms; {
struct ec_creg_mask_parms parms;
parms.start_ctl = cr; memset(&parms.orvals, 0, sizeof(parms.orvals));
parms.end_ctl = cr; memset(&parms.andvals, 0xff, sizeof(parms.andvals));
parms.orvals[cr] = 1 << bit; parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = -1L; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_set_bit(cr, bit);
preempt_enable();
} }
/* /*
* Clear a bit in a control register of all cpus * Clear a bit in a control register of all cpus
*/ */
void smp_ctl_clear_bit(int cr, int bit) { void smp_ctl_clear_bit(int cr, int bit)
ec_creg_mask_parms parms; {
struct ec_creg_mask_parms parms;
parms.start_ctl = cr; memset(&parms.orvals, 0, sizeof(parms.orvals));
parms.end_ctl = cr; memset(&parms.andvals, 0xff, sizeof(parms.andvals));
parms.orvals[cr] = 0;
parms.andvals[cr] = ~(1L << bit); parms.andvals[cr] = ~(1L << bit);
preempt_disable(); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_clear_bit(cr, bit);
preempt_enable();
} }
/* /*
...@@ -650,9 +640,9 @@ __cpu_up(unsigned int cpu) ...@@ -650,9 +640,9 @@ __cpu_up(unsigned int cpu)
sf->gprs[9] = (unsigned long) sf; sf->gprs[9] = (unsigned long) sf;
cpu_lowcore->save_area[15] = (unsigned long) sf; cpu_lowcore->save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
__asm__ __volatile__("stam 0,15,0(%0)" asm volatile(
: : "a" (&cpu_lowcore->access_regs_save_area) " stam 0,15,0(%0)"
: "memory"); : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
cpu_lowcore->current_task = (unsigned long) idle; cpu_lowcore->current_task = (unsigned long) idle;
cpu_lowcore->cpu_data.cpu_nr = cpu; cpu_lowcore->cpu_data.cpu_nr = cpu;
...@@ -708,7 +698,7 @@ int ...@@ -708,7 +698,7 @@ int
__cpu_disable(void) __cpu_disable(void)
{ {
unsigned long flags; unsigned long flags;
ec_creg_mask_parms cr_parms; struct ec_creg_mask_parms cr_parms;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
spin_lock_irqsave(&smp_reserve_lock, flags); spin_lock_irqsave(&smp_reserve_lock, flags);
...@@ -724,30 +714,21 @@ __cpu_disable(void) ...@@ -724,30 +714,21 @@ __cpu_disable(void)
pfault_fini(); pfault_fini();
#endif #endif
/* disable all external interrupts */ memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
cr_parms.start_ctl = 0; /* disable all external interrupts */
cr_parms.end_ctl = 0;
cr_parms.orvals[0] = 0; cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
1<<11 | 1<<10 | 1<< 6 | 1<< 4); 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
smp_ctl_bit_callback(&cr_parms);
/* disable all I/O interrupts */ /* disable all I/O interrupts */
cr_parms.start_ctl = 6;
cr_parms.end_ctl = 6;
cr_parms.orvals[6] = 0; cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
1<<27 | 1<<26 | 1<<25 | 1<<24); 1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms);
/* disable most machine checks */ /* disable most machine checks */
cr_parms.start_ctl = 14;
cr_parms.end_ctl = 14;
cr_parms.orvals[14] = 0; cr_parms.orvals[14] = 0;
cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms); smp_ctl_bit_callback(&cr_parms);
spin_unlock_irqrestore(&smp_reserve_lock, flags); spin_unlock_irqrestore(&smp_reserve_lock, flags);
......
...@@ -351,10 +351,12 @@ void __init time_init(void) ...@@ -351,10 +351,12 @@ void __init time_init(void)
int cc; int cc;
/* kick the TOD clock */ /* kick the TOD clock */
asm volatile ("STCK 0(%1)\n\t" asm volatile(
"IPM %0\n\t" " stck 0(%2)\n"
"SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc) " ipm %0\n"
: "memory", "cc"); " srl %0,28"
: "=d" (cc), "=m" (init_timer_cc)
: "a" (&init_timer_cc) : "cc");
switch (cc) { switch (cc) {
case 0: /* clock in set state: all is fine */ case 0: /* clock in set state: all is fine */
break; break;
......
...@@ -597,8 +597,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) ...@@ -597,8 +597,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
local_irq_enable(); local_irq_enable();
if (MACHINE_HAS_IEEE) if (MACHINE_HAS_IEEE)
__asm__ volatile ("stfpc %0\n\t" asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
: "=m" (current->thread.fp_regs.fpc));
#ifdef CONFIG_MATHEMU #ifdef CONFIG_MATHEMU
else if (regs->psw.mask & PSW_MASK_PSTATE) { else if (regs->psw.mask & PSW_MASK_PSTATE) {
......
...@@ -27,9 +27,7 @@ void __delay(unsigned long loops) ...@@ -27,9 +27,7 @@ void __delay(unsigned long loops)
* yield the megahertz number of the cpu. The important function * yield the megahertz number of the cpu. The important function
* is udelay and that is done using the tod clock. -- martin. * is udelay and that is done using the tod clock. -- martin.
*/ */
__asm__ __volatile__( asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
"0: brct %0,0b"
: /* no outputs */ : "r" ((loops/2) + 1));
} }
/* /*
...@@ -38,13 +36,12 @@ void __delay(unsigned long loops) ...@@ -38,13 +36,12 @@ void __delay(unsigned long loops)
*/ */
void __udelay(unsigned long usecs) void __udelay(unsigned long usecs)
{ {
uint64_t start_cc, end_cc; uint64_t start_cc;
if (usecs == 0) if (usecs == 0)
return; return;
asm volatile ("STCK %0" : "=m" (start_cc)); start_cc = get_clock();
do { do {
cpu_relax(); cpu_relax();
asm volatile ("STCK %0" : "=m" (end_cc)); } while (((get_clock() - start_cc)/4096) < usecs);
} while (((end_cc - start_cc)/4096) < usecs);
} }
...@@ -1566,50 +1566,50 @@ static int emu_tceb (struct pt_regs *regs, int rx, long val) { ...@@ -1566,50 +1566,50 @@ static int emu_tceb (struct pt_regs *regs, int rx, long val) {
static inline void emu_load_regd(int reg) { static inline void emu_load_regd(int reg) {
if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return; return;
asm volatile ( /* load reg from fp_regs.fprs[reg] */ asm volatile( /* load reg from fp_regs.fprs[reg] */
" bras 1,0f\n" " bras 1,0f\n"
" ld 0,0(%1)\n" " ld 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d) : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d)
: "1" ); : "1");
} }
static inline void emu_load_rege(int reg) { static inline void emu_load_rege(int reg) {
if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return; return;
asm volatile ( /* load reg from fp_regs.fprs[reg] */ asm volatile( /* load reg from fp_regs.fprs[reg] */
" bras 1,0f\n" " bras 1,0f\n"
" le 0,0(%1)\n" " le 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
: "1" ); : "1");
} }
static inline void emu_store_regd(int reg) { static inline void emu_store_regd(int reg) {
if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return; return;
asm volatile ( /* store reg to fp_regs.fprs[reg] */ asm volatile( /* store reg to fp_regs.fprs[reg] */
" bras 1,0f\n" " bras 1,0f\n"
" std 0,0(%1)\n" " std 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d) : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d)
: "1" ); : "1");
} }
static inline void emu_store_rege(int reg) { static inline void emu_store_rege(int reg) {
if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ if ((reg&9) != 0) /* test if reg in {0,2,4,6} */
return; return;
asm volatile ( /* store reg to fp_regs.fprs[reg] */ asm volatile( /* store reg to fp_regs.fprs[reg] */
" bras 1,0f\n" " bras 1,0f\n"
" ste 0,0(%1)\n" " ste 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f)
: "1" ); : "1");
} }
int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { int math_emu_b3(__u8 *opcode, struct pt_regs * regs) {
...@@ -2089,23 +2089,22 @@ int math_emu_ldr(__u8 *opcode) { ...@@ -2089,23 +2089,22 @@ int math_emu_ldr(__u8 *opcode) {
if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
/* we got an exception therfore ry can't be in {0,2,4,6} */ /* we got an exception therfore ry can't be in {0,2,4,6} */
__asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ asm volatile( /* load rx from fp_regs.fprs[ry] */
" bras 1,0f\n" " bras 1,0f\n"
" ld 0,0(%1)\n" " ld 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" (opc & 0xf0), : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d)
"a" (&fp_regs->fprs[opc & 0xf].d) : "1");
: "1" );
} else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
__asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ asm volatile ( /* store ry to fp_regs.fprs[rx] */
" bras 1,0f\n" " bras 1,0f\n"
" std 0,0(%1)\n" " std 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" ((opc & 0xf) << 4), : "a" ((opc & 0xf) << 4),
"a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d)
: "1" ); : "1");
} else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
return 0; return 0;
...@@ -2120,23 +2119,22 @@ int math_emu_ler(__u8 *opcode) { ...@@ -2120,23 +2119,22 @@ int math_emu_ler(__u8 *opcode) {
if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */
/* we got an exception therfore ry can't be in {0,2,4,6} */ /* we got an exception therfore ry can't be in {0,2,4,6} */
__asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ asm volatile( /* load rx from fp_regs.fprs[ry] */
" bras 1,0f\n" " bras 1,0f\n"
" le 0,0(%1)\n" " le 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" (opc & 0xf0), : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f)
"a" (&fp_regs->fprs[opc & 0xf].f) : "1");
: "1" );
} else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */
__asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ asm volatile( /* store ry to fp_regs.fprs[rx] */
" bras 1,0f\n" " bras 1,0f\n"
" ste 0,0(%1)\n" " ste 0,0(%1)\n"
"0: ex %0,0(1)" "0: ex %0,0(1)"
: /* no output */ : /* no output */
: "a" ((opc & 0xf) << 4), : "a" ((opc & 0xf) << 4),
"a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f)
: "1" ); : "1");
} else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */
fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf];
return 0; return 0;
......
...@@ -6,12 +6,13 @@ ...@@ -6,12 +6,13 @@
#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
unsigned int __sh = (ah); \ unsigned int __sh = (ah); \
unsigned int __sl = (al); \ unsigned int __sl = (al); \
__asm__ (" alr %1,%3\n" \ asm volatile( \
" alr %1,%3\n" \
" brc 12,0f\n" \ " brc 12,0f\n" \
" ahi %0,1\n" \ " ahi %0,1\n" \
"0: alr %0,%2" \ "0: alr %0,%2" \
: "+&d" (__sh), "+d" (__sl) \ : "+&d" (__sh), "+d" (__sl) \
: "d" (bh), "d" (bl) : "cc" ); \ : "d" (bh), "d" (bl) : "cc"); \
(sh) = __sh; \ (sh) = __sh; \
(sl) = __sl; \ (sl) = __sl; \
}) })
...@@ -19,12 +20,13 @@ ...@@ -19,12 +20,13 @@
#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ #define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
unsigned int __sh = (ah); \ unsigned int __sh = (ah); \
unsigned int __sl = (al); \ unsigned int __sl = (al); \
__asm__ (" slr %1,%3\n" \ asm volatile( \
" slr %1,%3\n" \
" brc 3,0f\n" \ " brc 3,0f\n" \
" ahi %0,-1\n" \ " ahi %0,-1\n" \
"0: slr %0,%2" \ "0: slr %0,%2" \
: "+&d" (__sh), "+d" (__sl) \ : "+&d" (__sh), "+d" (__sl) \
: "d" (bh), "d" (bl) : "cc" ); \ : "d" (bh), "d" (bl) : "cc"); \
(sh) = __sh; \ (sh) = __sh; \
(sl) = __sl; \ (sl) = __sl; \
}) })
...@@ -33,7 +35,8 @@ ...@@ -33,7 +35,8 @@
#define umul_ppmm(wh, wl, u, v) ({ \ #define umul_ppmm(wh, wl, u, v) ({ \
unsigned int __wh = u; \ unsigned int __wh = u; \
unsigned int __wl = v; \ unsigned int __wl = v; \
__asm__ (" ltr 1,%0\n" \ asm volatile( \
" ltr 1,%0\n" \
" mr 0,%1\n" \ " mr 0,%1\n" \
" jnm 0f\n" \ " jnm 0f\n" \
" alr 0,%1\n" \ " alr 0,%1\n" \
...@@ -43,7 +46,7 @@ ...@@ -43,7 +46,7 @@
"1: lr %0,0\n" \ "1: lr %0,0\n" \
" lr %1,1\n" \ " lr %1,1\n" \
: "+d" (__wh), "+d" (__wl) \ : "+d" (__wh), "+d" (__wl) \
: : "0", "1", "cc" ); \ : : "0", "1", "cc"); \
wh = __wh; \ wh = __wh; \
wl = __wl; \ wl = __wl; \
}) })
......
...@@ -142,17 +142,17 @@ dcss_diag (__u8 func, void *parameter, ...@@ -142,17 +142,17 @@ dcss_diag (__u8 func, void *parameter,
rx = (unsigned long) parameter; rx = (unsigned long) parameter;
ry = (unsigned long) func; ry = (unsigned long) func;
__asm__ __volatile__( asm volatile(
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
" sam31\n" // switch to 31 bit " sam31\n"
" diag %0,%1,0x64\n" " diag %0,%1,0x64\n"
" sam64\n" // switch back to 64 bit " sam64\n"
#else #else
" diag %0,%1,0x64\n" " diag %0,%1,0x64\n"
#endif #endif
" ipm %2\n" " ipm %2\n"
" srl %2,28\n" " srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" ); : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
*ret1 = rx; *ret1 = rx;
*ret2 = ry; *ret2 = ry;
return rc; return rc;
......
...@@ -424,20 +424,13 @@ int pfault_init(void) ...@@ -424,20 +424,13 @@ int pfault_init(void)
if (pfault_disable) if (pfault_disable)
return -1; return -1;
__asm__ __volatile__( asm volatile(
" diag %1,%0,0x258\n" " diag %1,%0,0x258\n"
"0: j 2f\n" "0: j 2f\n"
"1: la %0,8\n" "1: la %0,8\n"
"2:\n" "2:\n"
".section __ex_table,\"a\"\n" EX_TABLE(0b,1b)
" .align 4\n" : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
#ifndef CONFIG_64BIT
" .long 0b,1b\n"
#else /* CONFIG_64BIT */
" .quad 0b,1b\n"
#endif /* CONFIG_64BIT */
".previous"
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
__ctl_set_bit(0, 9); __ctl_set_bit(0, 9);
return rc; return rc;
} }
...@@ -450,18 +443,11 @@ void pfault_fini(void) ...@@ -450,18 +443,11 @@ void pfault_fini(void)
if (pfault_disable) if (pfault_disable)
return; return;
__ctl_clear_bit(0,9); __ctl_clear_bit(0,9);
__asm__ __volatile__( asm volatile(
" diag %0,0,0x258\n" " diag %0,0,0x258\n"
"0:\n" "0:\n"
".section __ex_table,\"a\"\n" EX_TABLE(0b,0b)
" .align 4\n" : : "a" (&refbk), "m" (refbk) : "cc");
#ifndef CONFIG_64BIT
" .long 0b,0b\n"
#else /* CONFIG_64BIT */
" .quad 0b,0b\n"
#endif /* CONFIG_64BIT */
".previous"
: : "a" (&refbk), "m" (refbk) : "cc" );
} }
asmlinkage void asmlinkage void
......
...@@ -45,26 +45,17 @@ void diag10(unsigned long addr) ...@@ -45,26 +45,17 @@ void diag10(unsigned long addr)
{ {
if (addr >= 0x7ff00000) if (addr >= 0x7ff00000)
return; return;
asm volatile(
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
asm volatile (
" sam31\n" " sam31\n"
" diag %0,%0,0x10\n" " diag %0,%0,0x10\n"
"0: sam64\n" "0: sam64\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b, 0b\n"
".previous\n"
: : "a" (addr));
#else #else
asm volatile (
" diag %0,%0,0x10\n" " diag %0,%0,0x10\n"
"0:\n" "0:\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b, 0b\n"
".previous\n"
: : "a" (addr));
#endif #endif
EX_TABLE(0b,0b)
: : "a" (addr));
} }
void show_mem(void) void show_mem(void)
...@@ -156,11 +147,10 @@ void __init paging_init(void) ...@@ -156,11 +147,10 @@ void __init paging_init(void)
S390_lowcore.kernel_asce = pgdir_k; S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */ /* enable virtual mapping in kernel mode */
__asm__ __volatile__(" LCTL 1,1,%0\n" __ctl_load(pgdir_k, 1, 1);
" LCTL 7,7,%0\n" __ctl_load(pgdir_k, 7, 7);
" LCTL 13,13,%0\n" __ctl_load(pgdir_k, 13, 13);
" SSM %1" __raw_local_irq_ssm(ssm_mask);
: : "m" (pgdir_k), "m" (ssm_mask));
local_flush_tlb(); local_flush_tlb();
return; return;
...@@ -241,11 +231,10 @@ void __init paging_init(void) ...@@ -241,11 +231,10 @@ void __init paging_init(void)
S390_lowcore.kernel_asce = pgdir_k; S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */ /* enable virtual mapping in kernel mode */
__asm__ __volatile__("lctlg 1,1,%0\n\t" __ctl_load(pgdir_k, 1, 1);
"lctlg 7,7,%0\n\t" __ctl_load(pgdir_k, 7, 7);
"lctlg 13,13,%0\n\t" __ctl_load(pgdir_k, 13, 13);
"ssm %1" __raw_local_irq_ssm(ssm_mask);
: :"m" (pgdir_k), "m" (ssm_mask));
local_flush_tlb(); local_flush_tlb();
......
...@@ -63,44 +63,26 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */ ...@@ -63,44 +63,26 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
* and function code cmd. * and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of * In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */ * resulting condition code and DIAG return code. */
static __inline__ int static inline int dia250(void *iob, int cmd)
dia250(void *iob, int cmd)
{ {
register unsigned long reg0 asm ("0") = (unsigned long) iob;
typedef union { typedef union {
struct dasd_diag_init_io init_io; struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io; struct dasd_diag_rw_io rw_io;
} addr_type; } addr_type;
int rc; int rc;
__asm__ __volatile__( rc = 3;
#ifdef CONFIG_64BIT asm volatile(
" lghi %0,3\n"
" lgr 0,%3\n"
" diag 0,%2,0x250\n"
"0: ipm %0\n"
" srl %0,28\n"
" or %0,1\n"
"1:\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,1b\n"
".previous\n"
#else
" lhi %0,3\n"
" lr 0,%3\n"
" diag 0,%2,0x250\n" " diag 0,%2,0x250\n"
"0: ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
" or %0,1\n" " or %0,1\n"
"1:\n" "1:\n"
".section __ex_table,\"a\"\n" EX_TABLE(0b,1b)
" .align 4\n" : "+d" (rc), "=m" (*(addr_type *) iob)
" .long 0b,1b\n" : "d" (cmd), "d" (reg0), "m" (*(addr_type *) iob)
".previous\n" : "1", "cc");
#endif
: "=&d" (rc), "=m" (*(addr_type *) iob)
: "d" (cmd), "d" (iob), "m" (*(addr_type *) iob)
: "0", "1", "cc");
return rc; return rc;
} }
......
...@@ -89,28 +89,15 @@ MODULE_LICENSE("GPL"); ...@@ -89,28 +89,15 @@ MODULE_LICENSE("GPL");
*/ */
static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
{ {
int cc; int cc = 2; /* return unused cc 2 if pgin traps */
__asm__ __volatile__ ( asm volatile(
" lhi %0,2\n" /* return unused cc 2 if pgin traps */
" .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */ " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
"0: ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
"1:\n" "1:\n"
#ifndef CONFIG_64BIT EX_TABLE(0b,1b)
".section __ex_table,\"a\"\n" : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
" .align 4\n"
" .long 0b,1b\n"
".previous"
#else
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,1b\n"
".previous"
#endif
: "=&d" (cc)
: "a" (__pa(page_addr)), "a" (xpage_index)
: "cc" );
if (cc == 3) if (cc == 3)
return -ENXIO; return -ENXIO;
if (cc == 2) { if (cc == 2) {
...@@ -137,28 +124,15 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) ...@@ -137,28 +124,15 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
*/ */
static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
{ {
int cc; int cc = 2; /* return unused cc 2 if pgin traps */
__asm__ __volatile__ ( asm volatile(
" lhi %0,2\n" /* return unused cc 2 if pgout traps */
" .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */ " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
"0: ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
"1:\n" "1:\n"
#ifndef CONFIG_64BIT EX_TABLE(0b,1b)
".section __ex_table,\"a\"\n" : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc");
" .align 4\n"
" .long 0b,1b\n"
".previous"
#else
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,1b\n"
".previous"
#endif
: "=&d" (cc)
: "a" (__pa(page_addr)), "a" (xpage_index)
: "cc" );
if (cc == 3) if (cc == 3)
return -ENXIO; return -ENXIO;
if (cc == 2) { if (cc == 2) {
......
...@@ -100,13 +100,12 @@ service_call(sclp_cmdw_t command, void *sccb) ...@@ -100,13 +100,12 @@ service_call(sclp_cmdw_t command, void *sccb)
{ {
int cc; int cc;
__asm__ __volatile__( asm volatile(
" .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=&d" (cc) : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
: "d" (command), "a" (__pa(sccb)) : "cc", "memory");
: "cc", "memory" );
if (cc == 3) if (cc == 3)
return -EIO; return -EIO;
if (cc == 2) if (cc == 2)
...@@ -360,16 +359,6 @@ sclp_interrupt_handler(struct pt_regs *regs, __u16 code) ...@@ -360,16 +359,6 @@ sclp_interrupt_handler(struct pt_regs *regs, __u16 code)
sclp_process_queue(); sclp_process_queue();
} }
/* Return current Time-Of-Day clock. */
static inline u64
sclp_get_clock(void)
{
u64 result;
asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc");
return result;
}
/* Convert interval in jiffies to TOD ticks. */ /* Convert interval in jiffies to TOD ticks. */
static inline u64 static inline u64
sclp_tod_from_jiffies(unsigned long jiffies) sclp_tod_from_jiffies(unsigned long jiffies)
...@@ -382,7 +371,6 @@ sclp_tod_from_jiffies(unsigned long jiffies) ...@@ -382,7 +371,6 @@ sclp_tod_from_jiffies(unsigned long jiffies)
void void
sclp_sync_wait(void) sclp_sync_wait(void)
{ {
unsigned long psw_mask;
unsigned long flags; unsigned long flags;
unsigned long cr0, cr0_sync; unsigned long cr0, cr0_sync;
u64 timeout; u64 timeout;
...@@ -392,7 +380,7 @@ sclp_sync_wait(void) ...@@ -392,7 +380,7 @@ sclp_sync_wait(void)
timeout = 0; timeout = 0;
if (timer_pending(&sclp_request_timer)) { if (timer_pending(&sclp_request_timer)) {
/* Get timeout TOD value */ /* Get timeout TOD value */
timeout = sclp_get_clock() + timeout = get_clock() +
sclp_tod_from_jiffies(sclp_request_timer.expires - sclp_tod_from_jiffies(sclp_request_timer.expires -
jiffies); jiffies);
} }
...@@ -406,13 +394,12 @@ sclp_sync_wait(void) ...@@ -406,13 +394,12 @@ sclp_sync_wait(void)
cr0_sync |= 0x00000200; cr0_sync |= 0x00000200;
cr0_sync &= 0xFFFFF3AC; cr0_sync &= 0xFFFFF3AC;
__ctl_load(cr0_sync, 0, 0); __ctl_load(cr0_sync, 0, 0);
asm volatile ("STOSM 0(%1),0x01" __raw_local_irq_stosm(0x01);
: "=m" (psw_mask) : "a" (&psw_mask) : "memory");
/* Loop until driver state indicates finished request */ /* Loop until driver state indicates finished request */
while (sclp_running_state != sclp_running_state_idle) { while (sclp_running_state != sclp_running_state_idle) {
/* Check for expired request timer */ /* Check for expired request timer */
if (timer_pending(&sclp_request_timer) && if (timer_pending(&sclp_request_timer) &&
sclp_get_clock() > timeout && get_clock() > timeout &&
del_timer(&sclp_request_timer)) del_timer(&sclp_request_timer))
sclp_request_timer.function(sclp_request_timer.data); sclp_request_timer.function(sclp_request_timer.data);
barrier(); barrier();
......
...@@ -54,48 +54,20 @@ enum vmwdt_func { ...@@ -54,48 +54,20 @@ enum vmwdt_func {
static int __diag288(enum vmwdt_func func, unsigned int timeout, static int __diag288(enum vmwdt_func func, unsigned int timeout,
char *cmd, size_t len) char *cmd, size_t len)
{ {
register unsigned long __func asm("2"); register unsigned long __func asm("2") = func;
register unsigned long __timeout asm("3"); register unsigned long __timeout asm("3") = timeout;
register unsigned long __cmdp asm("4"); register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
register unsigned long __cmdl asm("5"); register unsigned long __cmdl asm("5") = len;
int err; int err;
__func = func; err = -EINVAL;
__timeout = timeout; asm volatile(
__cmdp = virt_to_phys(cmd); " diag %1,%3,0x288\n"
__cmdl = len; "0: la %0,0\n"
err = 0; "1:\n"
asm volatile ( EX_TABLE(0b,1b)
#ifdef CONFIG_64BIT : "=d" (err) : "d"(__func), "d"(__timeout),
"diag %2,%4,0x288\n" "d"(__cmdp), "d"(__cmdl), "0" (-EINVAL) : "1", "cc");
"1: \n"
".section .fixup,\"ax\"\n"
"2: lghi %0,%1\n"
" jg 1b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 1b,2b\n"
".previous\n"
#else
"diag %2,%4,0x288\n"
"1: \n"
".section .fixup,\"ax\"\n"
"2: lhi %0,%1\n"
" bras 1,3f\n"
" .long 1b\n"
"3: l 1,0(1)\n"
" br 1\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 1b,2b\n"
".previous\n"
#endif
: "+&d"(err)
: "i"(-EINVAL), "d"(__func), "d"(__timeout),
"d"(__cmdp), "d"(__cmdl)
: "1", "cc");
return err; return err;
} }
......
...@@ -42,18 +42,15 @@ diag210(struct diag210 * addr) ...@@ -42,18 +42,15 @@ diag210(struct diag210 * addr)
spin_lock_irqsave(&diag210_lock, flags); spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr; diag210_tmp = *addr;
asm volatile ( asm volatile(
" lhi %0,-1\n" " lhi %0,-1\n"
" sam31\n" " sam31\n"
" diag %1,0,0x210\n" " diag %1,0,0x210\n"
"0: ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
"1: sam64\n" "1: sam64\n"
".section __ex_table,\"a\"\n" EX_TABLE(0b,1b)
" .align 8\n" : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory");
" .quad 0b,1b\n"
".previous"
: "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" );
*addr = diag210_tmp; *addr = diag210_tmp;
spin_unlock_irqrestore(&diag210_lock, flags); spin_unlock_irqrestore(&diag210_lock, flags);
...@@ -66,17 +63,14 @@ diag210(struct diag210 * addr) ...@@ -66,17 +63,14 @@ diag210(struct diag210 * addr)
{ {
int ccode; int ccode;
asm volatile ( asm volatile(
" lhi %0,-1\n" " lhi %0,-1\n"
" diag %1,0,0x210\n" " diag %1,0,0x210\n"
"0: ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
"1:\n" "1:\n"
".section __ex_table,\"a\"\n" EX_TABLE(0b,1b)
" .align 4\n" : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory");
" .long 0b,1b\n"
".previous"
: "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" );
return ccode; return ccode;
} }
......
...@@ -25,106 +25,74 @@ struct tpi_info { ...@@ -25,106 +25,74 @@ struct tpi_info {
static inline int stsch(struct subchannel_id schid, static inline int stsch(struct subchannel_id schid,
volatile struct schib *addr) volatile struct schib *addr)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" stsch 0(%2)\n" " stsch 0(%2)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
: "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int stsch_err(struct subchannel_id schid, static inline int stsch_err(struct subchannel_id schid,
volatile struct schib *addr) volatile struct schib *addr)
{ {
int ccode; register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
__asm__ __volatile__( asm volatile(
" lhi %0,%3\n"
" lr 1,%1\n"
" stsch 0(%2)\n" " stsch 0(%2)\n"
"0: ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
"1:\n" "1:\n"
#ifdef CONFIG_64BIT EX_TABLE(0b,1b)
".section __ex_table,\"a\"\n" : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
" .align 8\n"
" .quad 0b,1b\n"
".previous"
#else
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,1b\n"
".previous"
#endif
: "=&d" (ccode)
: "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int msch(struct subchannel_id schid, static inline int msch(struct subchannel_id schid,
volatile struct schib *addr) volatile struct schib *addr)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" msch 0(%2)\n" " msch 0(%2)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
: "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int msch_err(struct subchannel_id schid, static inline int msch_err(struct subchannel_id schid,
volatile struct schib *addr) volatile struct schib *addr)
{ {
int ccode; register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
__asm__ __volatile__( asm volatile(
" lhi %0,%3\n"
" lr 1,%1\n"
" msch 0(%2)\n" " msch 0(%2)\n"
"0: ipm %0\n" "0: ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
"1:\n" "1:\n"
#ifdef CONFIG_64BIT EX_TABLE(0b,1b)
".section __ex_table,\"a\"\n" : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
" .align 8\n"
" .quad 0b,1b\n"
".previous"
#else
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,1b\n"
".previous"
#endif
: "=&d" (ccode)
: "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int tsch(struct subchannel_id schid, static inline int tsch(struct subchannel_id schid,
volatile struct irb *addr) volatile struct irb *addr)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" tsch 0(%2)\n" " tsch 0(%2)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
: "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode; return ccode;
} }
...@@ -132,89 +100,77 @@ static inline int tpi( volatile struct tpi_info *addr) ...@@ -132,89 +100,77 @@ static inline int tpi( volatile struct tpi_info *addr)
{ {
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" tpi 0(%1)\n" " tpi 0(%1)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "a" (addr), "m" (*addr) : "cc");
: "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int ssch(struct subchannel_id schid, static inline int ssch(struct subchannel_id schid,
volatile struct orb *addr) volatile struct orb *addr)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" ssch 0(%2)\n" " ssch 0(%2)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
: "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int rsch(struct subchannel_id schid) static inline int rsch(struct subchannel_id schid)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" rsch\n" " rsch\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1) : "cc");
: "d" (schid)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int csch(struct subchannel_id schid) static inline int csch(struct subchannel_id schid)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" csch\n" " csch\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1) : "cc");
: "d" (schid)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int hsch(struct subchannel_id schid) static inline int hsch(struct subchannel_id schid)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" hsch\n" " hsch\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1) : "cc");
: "d" (schid)
: "cc", "1" );
return ccode; return ccode;
} }
static inline int xsch(struct subchannel_id schid) static inline int xsch(struct subchannel_id schid)
{ {
register struct subchannel_id reg1 asm ("1") = schid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n"
" .insn rre,0xb2760000,%1,0\n" " .insn rre,0xb2760000,%1,0\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1) : "cc");
: "d" (schid)
: "cc", "1" );
return ccode; return ccode;
} }
...@@ -223,41 +179,27 @@ static inline int chsc(void *chsc_area) ...@@ -223,41 +179,27 @@ static inline int chsc(void *chsc_area)
typedef struct { char _[4096]; } addr_type; typedef struct { char _[4096]; } addr_type;
int cc; int cc;
__asm__ __volatile__ ( asm volatile(
".insn rre,0xb25f0000,%2,0 \n\t" " .insn rre,0xb25f0000,%2,0\n"
"ipm %0 \n\t" " ipm %0\n"
"srl %0,28 \n\t" " srl %0,28\n"
: "=d" (cc), "=m" (*(addr_type *) chsc_area) : "=d" (cc), "=m" (*(addr_type *) chsc_area)
: "d" (chsc_area), "m" (*(addr_type *) chsc_area) : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: "cc" ); : "cc");
return cc; return cc;
} }
static inline int iac( void)
{
int ccode;
__asm__ __volatile__(
" iac 1\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode) : : "cc", "1" );
return ccode;
}
static inline int rchp(int chpid) static inline int rchp(int chpid)
{ {
register unsigned int reg1 asm ("1") = chpid;
int ccode; int ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n" " lr 1,%1\n"
" rchp\n" " rchp\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28" " srl %0,28"
: "=d" (ccode) : "=d" (ccode) : "d" (reg1) : "cc");
: "d" (chpid)
: "cc", "1" );
return ccode; return ccode;
} }
......
...@@ -274,12 +274,11 @@ do_sqbs(unsigned long sch, unsigned char state, int queue, ...@@ -274,12 +274,11 @@ do_sqbs(unsigned long sch, unsigned char state, int queue,
register unsigned long _sch asm ("1") = sch; register unsigned long _sch asm ("1") = sch;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
asm volatile ( asm volatile(
" .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t" " .insn rsy,0xeb000000008A,%1,0,0(%2)"
: "+d" (_ccq), "+d" (_queuestart) : "+d" (_ccq), "+d" (_queuestart)
: "d" ((unsigned long)state), "d" (_sch) : "d" ((unsigned long)state), "d" (_sch)
: "memory", "cc" : "memory", "cc");
);
*count = _ccq & 0xff; *count = _ccq & 0xff;
*start = _queuestart & 0xff; *start = _queuestart & 0xff;
...@@ -299,12 +298,11 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue, ...@@ -299,12 +298,11 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue,
unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = 0; unsigned long _state = 0;
asm volatile ( asm volatile(
" .insn rrf,0xB99c0000,%1,%2,0,0 \n\t" " .insn rrf,0xB99c0000,%1,%2,0,0"
: "+d" (_ccq), "+d" (_queuestart), "+d" (_state) : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
: "d" (_sch) : "d" (_sch)
: "memory", "cc" : "memory", "cc" );
);
*count = _ccq & 0xff; *count = _ccq & 0xff;
*start = _queuestart & 0xff; *start = _queuestart & 0xff;
*state = _state & 0xff; *state = _state & 0xff;
...@@ -319,69 +317,35 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue, ...@@ -319,69 +317,35 @@ do_eqbs(unsigned long sch, unsigned char *state, int queue,
static inline int static inline int
do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
{ {
register unsigned long reg0 asm ("0") = 2;
register struct subchannel_id reg1 asm ("1") = schid;
register unsigned long reg2 asm ("2") = mask1;
register unsigned long reg3 asm ("3") = mask2;
int cc; int cc;
#ifndef CONFIG_64BIT asm volatile(
asm volatile ( " siga 0\n"
"lhi 0,2 \n\t" " ipm %0\n"
"lr 1,%1 \n\t" " srl %0,28\n"
"lr 2,%2 \n\t"
"lr 3,%3 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc) : "=d" (cc)
: "d" (schid), "d" (mask1), "d" (mask2) : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
: "cc", "0", "1", "2", "3"
);
#else /* CONFIG_64BIT */
asm volatile (
"lghi 0,2 \n\t"
"llgfr 1,%1 \n\t"
"llgfr 2,%2 \n\t"
"llgfr 3,%3 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (schid), "d" (mask1), "d" (mask2)
: "cc", "0", "1", "2", "3"
);
#endif /* CONFIG_64BIT */
return cc; return cc;
} }
static inline int static inline int
do_siga_input(struct subchannel_id schid, unsigned int mask) do_siga_input(struct subchannel_id schid, unsigned int mask)
{ {
register unsigned long reg0 asm ("0") = 1;
register struct subchannel_id reg1 asm ("1") = schid;
register unsigned long reg2 asm ("2") = mask;
int cc; int cc;
#ifndef CONFIG_64BIT asm volatile(
asm volatile ( " siga 0\n"
"lhi 0,1 \n\t" " ipm %0\n"
"lr 1,%1 \n\t" " srl %0,28\n"
"lr 2,%2 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc) : "=d" (cc)
: "d" (schid), "d" (mask) : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
: "cc", "0", "1", "2", "memory"
);
#else /* CONFIG_64BIT */
asm volatile (
"lghi 0,1 \n\t"
"llgfr 1,%1 \n\t"
"llgfr 2,%2 \n\t"
"siga 0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (schid), "d" (mask)
: "cc", "0", "1", "2", "memory"
);
#endif /* CONFIG_64BIT */
return cc; return cc;
} }
...@@ -389,93 +353,35 @@ static inline int ...@@ -389,93 +353,35 @@ static inline int
do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
unsigned int fc) unsigned int fc)
{ {
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
int cc; int cc;
__u32 busy_bit;
#ifndef CONFIG_64BIT
asm volatile (
"lhi 0,0 \n\t"
"lr 1,%2 \n\t"
"lr 2,%3 \n\t"
"siga 0 \n\t"
"0:"
"ipm %0 \n\t"
"srl %0,28 \n\t"
"srl 0,31 \n\t"
"lr %1,0 \n\t"
"1: \n\t"
".section .fixup,\"ax\"\n\t"
"2: \n\t"
"lhi %0,%4 \n\t"
"bras 1,3f \n\t"
".long 1b \n\t"
"3: \n\t"
"l 1,0(1) \n\t"
"br 1 \n\t"
".previous \n\t"
".section __ex_table,\"a\"\n\t"
".align 4 \n\t"
".long 0b,2b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
: "d" (schid), "d" (mask),
"i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "0", "1", "2", "memory"
);
#else /* CONFIG_64BIT */
asm volatile (
"llgfr 0,%5 \n\t"
"lgr 1,%2 \n\t"
"llgfr 2,%3 \n\t"
"siga 0 \n\t"
"0:"
"ipm %0 \n\t"
"srl %0,28 \n\t"
"srl 0,31 \n\t"
"llgfr %1,0 \n\t"
"1: \n\t"
".section .fixup,\"ax\"\n\t"
"lghi %0,%4 \n\t"
"jg 1b \n\t"
".previous\n\t"
".section __ex_table,\"a\"\n\t"
".align 8 \n\t"
".quad 0b,1b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
: "d" (schid), "d" (mask),
"i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
: "cc", "0", "1", "2", "memory"
);
#endif /* CONFIG_64BIT */
(*bb) = busy_bit; asm volatile(
" siga 0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
: "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "memory");
(*bb) = ((unsigned int) __fc) >> 31;
return cc; return cc;
} }
static inline unsigned long static inline unsigned long
do_clear_global_summary(void) do_clear_global_summary(void)
{ {
register unsigned long __fn asm("1") = 3;
unsigned long time; register unsigned long __tmp asm("2");
register unsigned long __time asm("3");
#ifndef CONFIG_64BIT
asm volatile ( asm volatile(
"lhi 1,3 \n\t" " .insn rre,0xb2650000,2,0"
".insn rre,0xb2650000,2,0 \n\t" : "+d" (__fn), "=d" (__tmp), "=d" (__time));
"lr %0,3 \n\t" return __time;
: "=d" (time) : : "cc", "1", "2", "3"
);
#else /* CONFIG_64BIT */
asm volatile (
"lghi 1,3 \n\t"
".insn rre,0xb2650000,2,0 \n\t"
"lgr %0,3 \n\t"
: "=d" (time) : : "cc", "1", "2", "3"
);
#endif /* CONFIG_64BIT */
return time;
} }
/* /*
......
...@@ -534,19 +534,15 @@ iucv_add_handler (handler *new) ...@@ -534,19 +534,15 @@ iucv_add_handler (handler *new)
* *
* Returns: return code from CP's IUCV call * Returns: return code from CP's IUCV call
*/ */
static __inline__ ulong static inline ulong b2f0(__u32 code, void *parm)
b2f0(__u32 code, void *parm)
{ {
register unsigned long reg0 asm ("0");
register unsigned long reg1 asm ("1");
iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param)); iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
asm volatile ( reg0 = code;
"LRA 1,0(%1)\n\t" reg1 = virt_to_phys(parm);
"LR 0,%0\n\t" asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1));
".long 0xb2f01000"
:
: "d" (code), "a" (parm)
: "0", "1"
);
iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param)); iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
...@@ -1248,6 +1244,8 @@ iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit) ...@@ -1248,6 +1244,8 @@ iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
static int static int
iucv_query_generic(int want_maxconn) iucv_query_generic(int want_maxconn)
{ {
register unsigned long reg0 asm ("0");
register unsigned long reg1 asm ("1");
iparml_purge *parm = (iparml_purge *)grab_param(); iparml_purge *parm = (iparml_purge *)grab_param();
int bufsize, maxconn; int bufsize, maxconn;
int ccode; int ccode;
...@@ -1256,18 +1254,15 @@ iucv_query_generic(int want_maxconn) ...@@ -1256,18 +1254,15 @@ iucv_query_generic(int want_maxconn)
* Call b2f0 and store R0 (max buffer size), * Call b2f0 and store R0 (max buffer size),
* R1 (max connections) and CC. * R1 (max connections) and CC.
*/ */
asm volatile ( reg0 = QUERY;
"LRA 1,0(%4)\n\t" reg1 = virt_to_phys(parm);
"LR 0,%3\n\t" asm volatile(
".long 0xb2f01000\n\t" " .long 0xb2f01000\n"
"IPM %0\n\t" " ipm %0\n"
"SRL %0,28\n\t" " srl %0,28\n"
"ST 0,%1\n\t" : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
"ST 1,%2\n\t" bufsize = reg0;
: "=d" (ccode), "=m" (bufsize), "=m" (maxconn) maxconn = reg1;
: "d" (QUERY), "a" (parm)
: "0", "1", "cc"
);
release_param(parm); release_param(parm);
if (ccode) if (ccode)
......
...@@ -253,10 +253,11 @@ s390_revalidate_registers(struct mci *mci) ...@@ -253,10 +253,11 @@ s390_revalidate_registers(struct mci *mci)
kill_task = 1; kill_task = 1;
#ifndef CONFIG_64BIT #ifndef CONFIG_64BIT
asm volatile("ld 0,0(%0)\n" asm volatile(
"ld 2,8(%0)\n" " ld 0,0(%0)\n"
"ld 4,16(%0)\n" " ld 2,8(%0)\n"
"ld 6,24(%0)" " ld 4,16(%0)\n"
" ld 6,24(%0)"
: : "a" (&S390_lowcore.floating_pt_save_area)); : : "a" (&S390_lowcore.floating_pt_save_area));
#endif #endif
...@@ -274,36 +275,35 @@ s390_revalidate_registers(struct mci *mci) ...@@ -274,36 +275,35 @@ s390_revalidate_registers(struct mci *mci)
* Floating point control register can't be restored. * Floating point control register can't be restored.
* Task will be terminated. * Task will be terminated.
*/ */
asm volatile ("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
kill_task = 1; kill_task = 1;
} } else
else asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
asm volatile (
"lfpc 0(%0)" asm volatile(
: : "a" (fpt_creg_save_area)); " ld 0,0(%0)\n"
" ld 1,8(%0)\n"
asm volatile("ld 0,0(%0)\n" " ld 2,16(%0)\n"
"ld 1,8(%0)\n" " ld 3,24(%0)\n"
"ld 2,16(%0)\n" " ld 4,32(%0)\n"
"ld 3,24(%0)\n" " ld 5,40(%0)\n"
"ld 4,32(%0)\n" " ld 6,48(%0)\n"
"ld 5,40(%0)\n" " ld 7,56(%0)\n"
"ld 6,48(%0)\n" " ld 8,64(%0)\n"
"ld 7,56(%0)\n" " ld 9,72(%0)\n"
"ld 8,64(%0)\n" " ld 10,80(%0)\n"
"ld 9,72(%0)\n" " ld 11,88(%0)\n"
"ld 10,80(%0)\n" " ld 12,96(%0)\n"
"ld 11,88(%0)\n" " ld 13,104(%0)\n"
"ld 12,96(%0)\n" " ld 14,112(%0)\n"
"ld 13,104(%0)\n" " ld 15,120(%0)\n"
"ld 14,112(%0)\n"
"ld 15,120(%0)\n"
: : "a" (fpt_save_area)); : : "a" (fpt_save_area));
} }
/* Revalidate access registers */ /* Revalidate access registers */
asm volatile("lam 0,15,0(%0)" asm volatile(
" lam 0,15,0(%0)"
: : "a" (&S390_lowcore.access_regs_save_area)); : : "a" (&S390_lowcore.access_regs_save_area));
if (!mci->ar) if (!mci->ar)
/* /*
...@@ -321,10 +321,12 @@ s390_revalidate_registers(struct mci *mci) ...@@ -321,10 +321,12 @@ s390_revalidate_registers(struct mci *mci)
s390_handle_damage("invalid control registers."); s390_handle_damage("invalid control registers.");
else else
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
asm volatile("lctlg 0,15,0(%0)" asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area)); : : "a" (&S390_lowcore.cregs_save_area));
#else #else
asm volatile("lctl 0,15,0(%0)" asm volatile(
" lctl 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area)); : : "a" (&S390_lowcore.cregs_save_area));
#endif #endif
...@@ -339,19 +341,22 @@ s390_revalidate_registers(struct mci *mci) ...@@ -339,19 +341,22 @@ s390_revalidate_registers(struct mci *mci)
* old contents (should be zero) otherwise set it to zero. * old contents (should be zero) otherwise set it to zero.
*/ */
if (!mci->pr) if (!mci->pr)
asm volatile("sr 0,0\n" asm volatile(
"sckpf" " sr 0,0\n"
" sckpf"
: : : "0", "cc"); : : : "0", "cc");
else else
asm volatile( asm volatile(
"l 0,0(%0)\n" " l 0,0(%0)\n"
"sckpf" " sckpf"
: : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc"); : : "a" (&S390_lowcore.tod_progreg_save_area)
: "0", "cc");
#endif #endif
/* Revalidate clock comparator register */ /* Revalidate clock comparator register */
asm volatile ("stck 0(%1)\n" asm volatile(
"sckc 0(%1)" " stck 0(%1)\n"
" sckc 0(%1)"
: "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
/* Check if old PSW is valid */ /* Check if old PSW is valid */
......
...@@ -80,7 +80,7 @@ static inline int appldata_asm(struct appldata_product_id *id, ...@@ -80,7 +80,7 @@ static inline int appldata_asm(struct appldata_product_id *id,
parm_list.product_id_addr = (unsigned long) id; parm_list.product_id_addr = (unsigned long) id;
parm_list.buffer_addr = virt_to_phys(buffer); parm_list.buffer_addr = virt_to_phys(buffer);
asm volatile( asm volatile(
"diag %1,%0,0xdc" " diag %1,%0,0xdc"
: "=d" (ry) : "=d" (ry)
: "d" (&parm_list), "m" (parm_list), "m" (*id) : "d" (&parm_list), "m" (parm_list), "m" (*id)
: "cc"); : "cc");
......
...@@ -30,9 +30,29 @@ typedef struct { ...@@ -30,9 +30,29 @@ typedef struct {
#ifdef __KERNEL__ #ifdef __KERNEL__
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CS_LOOP(ptr, op_val, op_string) ({ \ #define __CS_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \ typeof(ptr->counter) old_val, new_val; \
__asm__ __volatile__(" l %0,0(%3)\n" \ asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \
"=Q" (((atomic_t *)(ptr))->counter) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \
new_val; \
})
#else /* __GNUC__ */
#define __CS_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \
asm volatile( \
" l %0,0(%3)\n" \
"0: lr %1,%0\n" \ "0: lr %1,%0\n" \
op_string " %1,%4\n" \ op_string " %1,%4\n" \
" cs %0,%1,0(%3)\n" \ " cs %0,%1,0(%3)\n" \
...@@ -41,9 +61,12 @@ typedef struct { ...@@ -41,9 +61,12 @@ typedef struct {
"=m" (((atomic_t *)(ptr))->counter) \ "=m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val), \ : "a" (ptr), "d" (op_val), \
"m" (((atomic_t *)(ptr))->counter) \ "m" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \ : "cc", "memory"); \
new_val; \ new_val; \
}) })
#endif /* __GNUC__ */
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
...@@ -81,10 +104,19 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) ...@@ -81,10 +104,19 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
__asm__ __volatile__(" cs %0,%3,0(%2)\n" #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile(
" cs %0,%2,%1"
: "+d" (old), "=Q" (v->counter)
: "d" (new), "Q" (v->counter)
: "cc", "memory");
#else /* __GNUC__ */
asm volatile(
" cs %0,%3,0(%2)"
: "+d" (old), "=m" (v->counter) : "+d" (old), "=m" (v->counter)
: "a" (v), "d" (new), "m" (v->counter) : "a" (v), "d" (new), "m" (v->counter)
: "cc", "memory" ); : "cc", "memory");
#endif /* __GNUC__ */
return old; return old;
} }
...@@ -113,9 +145,29 @@ typedef struct { ...@@ -113,9 +145,29 @@ typedef struct {
} __attribute__ ((aligned (8))) atomic64_t; } __attribute__ ((aligned (8))) atomic64_t;
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __CSG_LOOP(ptr, op_val, op_string) ({ \ #define __CSG_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \ typeof(ptr->counter) old_val, new_val; \
__asm__ __volatile__(" lg %0,0(%3)\n" \ asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \
"=Q" (((atomic_t *)(ptr))->counter) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory" ); \
new_val; \
})
#else /* __GNUC__ */
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \
asm volatile( \
" lg %0,0(%3)\n" \
"0: lgr %1,%0\n" \ "0: lgr %1,%0\n" \
op_string " %1,%4\n" \ op_string " %1,%4\n" \
" csg %0,%1,0(%3)\n" \ " csg %0,%1,0(%3)\n" \
...@@ -127,6 +179,9 @@ typedef struct { ...@@ -127,6 +179,9 @@ typedef struct {
: "cc", "memory" ); \ : "cc", "memory" ); \
new_val; \ new_val; \
}) })
#endif /* __GNUC__ */
#define atomic64_read(v) ((v)->counter) #define atomic64_read(v) ((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i))
...@@ -163,10 +218,19 @@ static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) ...@@ -163,10 +218,19 @@ static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
static __inline__ long long atomic64_cmpxchg(atomic64_t *v, static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
long long old, long long new) long long old, long long new)
{ {
__asm__ __volatile__(" csg %0,%3,0(%2)\n" #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile(
" csg %0,%2,%1"
: "+d" (old), "=Q" (v->counter)
: "d" (new), "Q" (v->counter)
: "cc", "memory");
#else /* __GNUC__ */
asm volatile(
" csg %0,%3,0(%2)"
: "+d" (old), "=m" (v->counter) : "+d" (old), "=m" (v->counter)
: "a" (v), "d" (new), "m" (v->counter) : "a" (v), "d" (new), "m" (v->counter)
: "cc", "memory" ); : "cc", "memory");
#endif /* __GNUC__ */
return old; return old;
} }
......
...@@ -67,8 +67,25 @@ extern const char _sb_findmap[]; ...@@ -67,8 +67,25 @@ extern const char _sb_findmap[];
#define __BITOPS_AND "nr" #define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr" #define __BITOPS_XOR "xr"
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \
" l %0,%2\n" \
"0: lr %1,%0\n" \
__op_string " %1,%3\n" \
" cs %0,%1,%2\n" \
" jl 0b" \
: "=&d" (__old), "=&d" (__new), \
"=Q" (*(unsigned long *) __addr) \
: "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc");
#else /* __GNUC__ */
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
__asm__ __volatile__(" l %0,0(%4)\n" \ asm volatile( \
" l %0,0(%4)\n" \
"0: lr %1,%0\n" \ "0: lr %1,%0\n" \
__op_string " %1,%3\n" \ __op_string " %1,%3\n" \
" cs %0,%1,0(%4)\n" \ " cs %0,%1,0(%4)\n" \
...@@ -76,7 +93,9 @@ extern const char _sb_findmap[]; ...@@ -76,7 +93,9 @@ extern const char _sb_findmap[];
: "=&d" (__old), "=&d" (__new), \ : "=&d" (__old), "=&d" (__new), \
"=m" (*(unsigned long *) __addr) \ "=m" (*(unsigned long *) __addr) \
: "d" (__val), "a" (__addr), \ : "d" (__val), "a" (__addr), \
"m" (*(unsigned long *) __addr) : "cc" ); "m" (*(unsigned long *) __addr) : "cc");
#endif /* __GNUC__ */
#else /* __s390x__ */ #else /* __s390x__ */
...@@ -86,8 +105,25 @@ extern const char _sb_findmap[]; ...@@ -86,8 +105,25 @@ extern const char _sb_findmap[];
#define __BITOPS_AND "ngr" #define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr" #define __BITOPS_XOR "xgr"
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
asm volatile( \
" lg %0,%2\n" \
"0: lgr %1,%0\n" \
__op_string " %1,%3\n" \
" csg %0,%1,%2\n" \
" jl 0b" \
: "=&d" (__old), "=&d" (__new), \
"=Q" (*(unsigned long *) __addr) \
: "d" (__val), "Q" (*(unsigned long *) __addr) \
: "cc");
#else /* __GNUC__ */
#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
__asm__ __volatile__(" lg %0,0(%4)\n" \ asm volatile( \
" lg %0,0(%4)\n" \
"0: lgr %1,%0\n" \ "0: lgr %1,%0\n" \
__op_string " %1,%3\n" \ __op_string " %1,%3\n" \
" csg %0,%1,0(%4)\n" \ " csg %0,%1,0(%4)\n" \
...@@ -95,12 +131,15 @@ extern const char _sb_findmap[]; ...@@ -95,12 +131,15 @@ extern const char _sb_findmap[];
: "=&d" (__old), "=&d" (__new), \ : "=&d" (__old), "=&d" (__new), \
"=m" (*(unsigned long *) __addr) \ "=m" (*(unsigned long *) __addr) \
: "d" (__val), "a" (__addr), \ : "d" (__val), "a" (__addr), \
"m" (*(unsigned long *) __addr) : "cc" ); "m" (*(unsigned long *) __addr) : "cc");
#endif /* __GNUC__ */
#endif /* __s390x__ */ #endif /* __s390x__ */
#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
#define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" ) #define __BITOPS_BARRIER() asm volatile("" : : : "memory")
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
...@@ -217,10 +256,10 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -217,10 +256,10 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr; unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
asm volatile("oc 0(1,%1),0(%2)" asm volatile(
: "=m" (*(char *) addr) " oc 0(1,%1),0(%2)"
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "=m" (*(char *) addr) : "a" (addr),
"m" (*(char *) addr) : "cc" ); "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
} }
static inline void static inline void
...@@ -229,40 +268,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) ...@@ -229,40 +268,7 @@ __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
unsigned long addr; unsigned long addr;
addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
switch (nr&7) { *(unsigned char *) addr |= 1 << (nr & 7);
case 0:
asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 1:
asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 2:
asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 3:
asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 4:
asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 5:
asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 6:
asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 7:
asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
}
} }
#define set_bit_simple(nr,addr) \ #define set_bit_simple(nr,addr) \
...@@ -279,10 +285,10 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -279,10 +285,10 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr; unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
asm volatile("nc 0(1,%1),0(%2)" asm volatile(
: "=m" (*(char *) addr) " nc 0(1,%1),0(%2)"
: "a" (addr), "a" (_ni_bitmap + (nr & 7)), : "=m" (*(char *) addr) : "a" (addr),
"m" (*(char *) addr) : "cc" ); "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc");
} }
static inline void static inline void
...@@ -291,40 +297,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) ...@@ -291,40 +297,7 @@ __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
unsigned long addr; unsigned long addr;
addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
switch (nr&7) { *(unsigned char *) addr &= ~(1 << (nr & 7));
case 0:
asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 1:
asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 2:
asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 3:
asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 4:
asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 5:
asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 6:
asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 7:
asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
}
} }
#define clear_bit_simple(nr,addr) \ #define clear_bit_simple(nr,addr) \
...@@ -340,10 +313,10 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) ...@@ -340,10 +313,10 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long addr; unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
asm volatile("xc 0(1,%1),0(%2)" asm volatile(
: "=m" (*(char *) addr) " xc 0(1,%1),0(%2)"
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "=m" (*(char *) addr) : "a" (addr),
"m" (*(char *) addr) : "cc" ); "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" );
} }
static inline void static inline void
...@@ -352,40 +325,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) ...@@ -352,40 +325,7 @@ __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
unsigned long addr; unsigned long addr;
addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
switch (nr&7) { *(unsigned char *) addr ^= 1 << (nr & 7);
case 0:
asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 1:
asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 2:
asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 3:
asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 4:
asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 5:
asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 6:
asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
case 7:
asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr)
: "a" (addr), "m" (*(char *) addr) : "cc" );
break;
}
} }
#define change_bit_simple(nr,addr) \ #define change_bit_simple(nr,addr) \
...@@ -404,10 +344,11 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -404,10 +344,11 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile("oc 0(1,%1),0(%2)" asm volatile(
" oc 0(1,%1),0(%2)"
: "=m" (*(char *) addr) : "=m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
"m" (*(char *) addr) : "cc", "memory" ); "m" (*(char *) addr) : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
...@@ -423,10 +364,11 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -423,10 +364,11 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile("nc 0(1,%1),0(%2)" asm volatile(
" nc 0(1,%1),0(%2)"
: "=m" (*(char *) addr) : "=m" (*(char *) addr)
: "a" (addr), "a" (_ni_bitmap + (nr & 7)), : "a" (addr), "a" (_ni_bitmap + (nr & 7)),
"m" (*(char *) addr) : "cc", "memory" ); "m" (*(char *) addr) : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
...@@ -442,10 +384,11 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) ...@@ -442,10 +384,11 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
ch = *(unsigned char *) addr; ch = *(unsigned char *) addr;
asm volatile("xc 0(1,%1),0(%2)" asm volatile(
" xc 0(1,%1),0(%2)"
: "=m" (*(char *) addr) : "=m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7)), : "a" (addr), "a" (_oi_bitmap + (nr & 7)),
"m" (*(char *) addr) : "cc", "memory" ); "m" (*(char *) addr) : "cc", "memory");
return (ch >> (nr & 7)) & 1; return (ch >> (nr & 7)) & 1;
} }
#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
...@@ -557,7 +500,8 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size) ...@@ -557,7 +500,8 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
if (!size) if (!size)
return 0; return 0;
__asm__(" lhi %1,-1\n" asm volatile(
" lhi %1,-1\n"
" lr %2,%3\n" " lr %2,%3\n"
" slr %0,%0\n" " slr %0,%0\n"
" ahi %2,31\n" " ahi %2,31\n"
...@@ -585,7 +529,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size) ...@@ -585,7 +529,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
"4:" "4:"
: "=&a" (res), "=&d" (cmp), "=&a" (count) : "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_zb_findmap), : "a" (size), "a" (addr), "a" (&_zb_findmap),
"m" (*(addrtype *) addr) : "cc" ); "m" (*(addrtype *) addr) : "cc");
return (res < size) ? res : size; return (res < size) ? res : size;
} }
...@@ -598,7 +542,8 @@ find_first_bit(const unsigned long * addr, unsigned long size) ...@@ -598,7 +542,8 @@ find_first_bit(const unsigned long * addr, unsigned long size)
if (!size) if (!size)
return 0; return 0;
__asm__(" slr %1,%1\n" asm volatile(
" slr %1,%1\n"
" lr %2,%3\n" " lr %2,%3\n"
" slr %0,%0\n" " slr %0,%0\n"
" ahi %2,31\n" " ahi %2,31\n"
...@@ -626,7 +571,7 @@ find_first_bit(const unsigned long * addr, unsigned long size) ...@@ -626,7 +571,7 @@ find_first_bit(const unsigned long * addr, unsigned long size)
"4:" "4:"
: "=&a" (res), "=&d" (cmp), "=&a" (count) : "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_sb_findmap), : "a" (size), "a" (addr), "a" (&_sb_findmap),
"m" (*(addrtype *) addr) : "cc" ); "m" (*(addrtype *) addr) : "cc");
return (res < size) ? res : size; return (res < size) ? res : size;
} }
...@@ -640,7 +585,8 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size) ...@@ -640,7 +585,8 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
if (!size) if (!size)
return 0; return 0;
__asm__(" lghi %1,-1\n" asm volatile(
" lghi %1,-1\n"
" lgr %2,%3\n" " lgr %2,%3\n"
" slgr %0,%0\n" " slgr %0,%0\n"
" aghi %2,63\n" " aghi %2,63\n"
...@@ -672,7 +618,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size) ...@@ -672,7 +618,7 @@ find_first_zero_bit(const unsigned long * addr, unsigned long size)
"5:" "5:"
: "=&a" (res), "=&d" (cmp), "=&a" (count) : "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_zb_findmap), : "a" (size), "a" (addr), "a" (&_zb_findmap),
"m" (*(addrtype *) addr) : "cc" ); "m" (*(addrtype *) addr) : "cc");
return (res < size) ? res : size; return (res < size) ? res : size;
} }
...@@ -684,7 +630,8 @@ find_first_bit(const unsigned long * addr, unsigned long size) ...@@ -684,7 +630,8 @@ find_first_bit(const unsigned long * addr, unsigned long size)
if (!size) if (!size)
return 0; return 0;
__asm__(" slgr %1,%1\n" asm volatile(
" slgr %1,%1\n"
" lgr %2,%3\n" " lgr %2,%3\n"
" slgr %0,%0\n" " slgr %0,%0\n"
" aghi %2,63\n" " aghi %2,63\n"
...@@ -716,7 +663,7 @@ find_first_bit(const unsigned long * addr, unsigned long size) ...@@ -716,7 +663,7 @@ find_first_bit(const unsigned long * addr, unsigned long size)
"5:" "5:"
: "=&a" (res), "=&d" (cmp), "=&a" (count) : "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_sb_findmap), : "a" (size), "a" (addr), "a" (&_sb_findmap),
"m" (*(addrtype *) addr) : "cc" ); "m" (*(addrtype *) addr) : "cc");
return (res < size) ? res : size; return (res < size) ? res : size;
} }
...@@ -832,7 +779,8 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size) ...@@ -832,7 +779,8 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size)
if (!size) if (!size)
return 0; return 0;
__asm__(" lhi %1,-1\n" asm volatile(
" lhi %1,-1\n"
" lr %2,%3\n" " lr %2,%3\n"
" ahi %2,31\n" " ahi %2,31\n"
" srl %2,5\n" " srl %2,5\n"
...@@ -861,7 +809,7 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size) ...@@ -861,7 +809,7 @@ ext2_find_first_zero_bit(void *vaddr, unsigned int size)
"4:" "4:"
: "=&a" (res), "=&d" (cmp), "=&a" (count) : "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (vaddr), "a" (&_zb_findmap), : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
"m" (*(addrtype *) vaddr) : "cc" ); "m" (*(addrtype *) vaddr) : "cc");
return (res < size) ? res : size; return (res < size) ? res : size;
} }
...@@ -875,7 +823,8 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size) ...@@ -875,7 +823,8 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size)
if (!size) if (!size)
return 0; return 0;
__asm__(" lghi %1,-1\n" asm volatile(
" lghi %1,-1\n"
" lgr %2,%3\n" " lgr %2,%3\n"
" aghi %2,63\n" " aghi %2,63\n"
" srlg %2,%2,6\n" " srlg %2,%2,6\n"
...@@ -907,7 +856,7 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size) ...@@ -907,7 +856,7 @@ ext2_find_first_zero_bit(void *vaddr, unsigned long size)
"5:" "5:"
: "=&a" (res), "=&d" (cmp), "=&a" (count) : "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (vaddr), "a" (&_zb_findmap), : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
"m" (*(addrtype *) vaddr) : "cc" ); "m" (*(addrtype *) vaddr) : "cc");
return (res < size) ? res : size; return (res < size) ? res : size;
} }
...@@ -927,13 +876,16 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) ...@@ -927,13 +876,16 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
p = addr + offset / __BITOPS_WORDSIZE; p = addr + offset / __BITOPS_WORDSIZE;
if (bit) { if (bit) {
#ifndef __s390x__ #ifndef __s390x__
asm(" ic %0,0(%1)\n" asm volatile(
" ic %0,0(%1)\n"
" icm %0,2,1(%1)\n" " icm %0,2,1(%1)\n"
" icm %0,4,2(%1)\n" " icm %0,4,2(%1)\n"
" icm %0,8,3(%1)" " icm %0,8,3(%1)"
: "=&a" (word) : "a" (p), "m" (*p) : "cc" ); : "=&a" (word) : "a" (p), "m" (*p) : "cc");
#else #else
asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); asm volatile(
" lrvg %0,%1"
: "=a" (word) : "m" (*p) );
#endif #endif
/* /*
* s390 version of ffz returns __BITOPS_WORDSIZE * s390 version of ffz returns __BITOPS_WORDSIZE
......
...@@ -14,60 +14,54 @@ ...@@ -14,60 +14,54 @@
#ifdef __GNUC__ #ifdef __GNUC__
#ifdef __s390x__ #ifdef __s390x__
static __inline__ __u64 ___arch__swab64p(const __u64 *x) static inline __u64 ___arch__swab64p(const __u64 *x)
{ {
__u64 result; __u64 result;
__asm__ __volatile__ ( asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
" lrvg %0,%1"
: "=d" (result) : "m" (*x) );
return result; return result;
} }
static __inline__ __u64 ___arch__swab64(__u64 x) static inline __u64 ___arch__swab64(__u64 x)
{ {
__u64 result; __u64 result;
__asm__ __volatile__ ( asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
" lrvgr %0,%1"
: "=d" (result) : "d" (x) );
return result; return result;
} }
static __inline__ void ___arch__swab64s(__u64 *x) static inline void ___arch__swab64s(__u64 *x)
{ {
*x = ___arch__swab64p(x); *x = ___arch__swab64p(x);
} }
#endif /* __s390x__ */ #endif /* __s390x__ */
static __inline__ __u32 ___arch__swab32p(const __u32 *x) static inline __u32 ___arch__swab32p(const __u32 *x)
{ {
__u32 result; __u32 result;
__asm__ __volatile__ ( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" icm %0,8,3(%1)\n" " icm %0,8,3(%1)\n"
" icm %0,4,2(%1)\n" " icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n" " icm %0,2,1(%1)\n"
" ic %0,0(%1)" " ic %0,0(%1)"
: "=&d" (result) : "a" (x), "m" (*x) : "cc" ); : "=&d" (result) : "a" (x), "m" (*x) : "cc");
#else /* __s390x__ */ #else /* __s390x__ */
" lrv %0,%1" " lrv %0,%1"
: "=d" (result) : "m" (*x) ); : "=d" (result) : "m" (*x));
#endif /* __s390x__ */ #endif /* __s390x__ */
return result; return result;
} }
static __inline__ __u32 ___arch__swab32(__u32 x) static inline __u32 ___arch__swab32(__u32 x)
{ {
#ifndef __s390x__ #ifndef __s390x__
return ___arch__swab32p(&x); return ___arch__swab32p(&x);
#else /* __s390x__ */ #else /* __s390x__ */
__u32 result; __u32 result;
__asm__ __volatile__ ( asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
" lrvr %0,%1"
: "=d" (result) : "d" (x) );
return result; return result;
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -81,14 +75,14 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x) ...@@ -81,14 +75,14 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
{ {
__u16 result; __u16 result;
__asm__ __volatile__ ( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" icm %0,2,1(%1)\n" " icm %0,2,1(%1)\n"
" ic %0,0(%1)\n" " ic %0,0(%1)\n"
: "=&d" (result) : "a" (x), "m" (*x) : "cc" ); : "=&d" (result) : "a" (x), "m" (*x) : "cc");
#else /* __s390x__ */ #else /* __s390x__ */
" lrvh %0,%1" " lrvh %0,%1"
: "=d" (result) : "m" (*x) ); : "=d" (result) : "m" (*x));
#endif /* __s390x__ */ #endif /* __s390x__ */
return result; return result;
} }
......
...@@ -30,57 +30,13 @@ ...@@ -30,57 +30,13 @@
static inline unsigned int static inline unsigned int
csum_partial(const unsigned char * buff, int len, unsigned int sum) csum_partial(const unsigned char * buff, int len, unsigned int sum)
{ {
/* register unsigned long reg2 asm("2") = (unsigned long) buff;
* Experiments with ethernet and slip connections show that buf register unsigned long reg3 asm("3") = (unsigned long) len;
* is aligned on either a 2-byte or 4-byte boundary.
*/
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) buff;
rp.subreg.odd = (unsigned long) len;
__asm__ __volatile__ (
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */
__asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum)
: "d" (buff), "d" (len)
: "cc", "memory", "2", "3" );
#endif /* __s390x__ */
return sum;
}
/*
* csum_partial as an inline function
*/
static inline unsigned int
csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
{
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) buff; asm volatile(
rp.subreg.odd = (unsigned long) len;
__asm__ __volatile__ (
"0: cksm %0,%1\n" /* do checksum on longs */ "0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n" " jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc", "memory" ); : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory");
#else /* __s390x__ */
__asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum)
: "d" (buff), "d" (len)
: "cc", "memory", "2", "3" );
#endif /* __s390x__ */
return sum; return sum;
} }
...@@ -114,7 +70,7 @@ static inline unsigned int ...@@ -114,7 +70,7 @@ static inline unsigned int
csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum) csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum)
{ {
memcpy(dst,src,len); memcpy(dst,src,len);
return csum_partial_inline(dst, len, sum); return csum_partial(dst, len, sum);
} }
/* /*
...@@ -126,20 +82,20 @@ csum_fold(unsigned int sum) ...@@ -126,20 +82,20 @@ csum_fold(unsigned int sum)
#ifndef __s390x__ #ifndef __s390x__
register_pair rp; register_pair rp;
__asm__ __volatile__ ( asm volatile(
" slr %N1,%N1\n" /* %0 = H L */ " slr %N1,%N1\n" /* %0 = H L */
" lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */
" srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
" alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
" alr %0,%1\n" /* %0 = H+L+C L+H */ " alr %0,%1\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */ " srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum), "=d" (rp) : : "cc" ); : "+&d" (sum), "=d" (rp) : : "cc");
#else /* __s390x__ */ #else /* __s390x__ */
__asm__ __volatile__ ( asm volatile(
" sr 3,3\n" /* %0 = H*65536 + L */ " sr 3,3\n" /* %0 = H*65536 + L */
" lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */ " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */
" srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */ " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */
" alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */ " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */
" alr %0,2\n" /* %0 = H+L+C L+H */ " alr %0,2\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */ " srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum) : : "cc", "2", "3"); : "+&d" (sum) : : "cc", "2", "3");
...@@ -155,29 +111,7 @@ csum_fold(unsigned int sum) ...@@ -155,29 +111,7 @@ csum_fold(unsigned int sum)
static inline unsigned short static inline unsigned short
ip_fast_csum(unsigned char *iph, unsigned int ihl) ip_fast_csum(unsigned char *iph, unsigned int ihl)
{ {
unsigned long sum; return csum_fold(csum_partial(iph, ihl*4, 0));
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) iph;
rp.subreg.odd = (unsigned long) ihl*4;
__asm__ __volatile__ (
" sr %0,%0\n" /* set sum to zero */
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "=&d" (sum), "+&a" (rp) : : "cc", "memory" );
#else /* __s390x__ */
__asm__ __volatile__ (
" slgr %0,%0\n" /* set sum to zero */
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on ints */
" jo 0b\n"
: "=&d" (sum)
: "d" (iph), "d" (ihl*4)
: "cc", "memory", "2", "3" );
#endif /* __s390x__ */
return csum_fold(sum);
} }
/* /*
...@@ -190,28 +124,28 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, ...@@ -190,28 +124,28 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned int sum) unsigned int sum)
{ {
#ifndef __s390x__ #ifndef __s390x__
__asm__ __volatile__ ( asm volatile(
" alr %0,%1\n" /* sum += saddr */ " alr %0,%1\n" /* sum += saddr */
" brc 12,0f\n" " brc 12,0f\n"
" ahi %0,1\n" /* add carry */ " ahi %0,1\n" /* add carry */
"0:" "0:"
: "+&d" (sum) : "d" (saddr) : "cc" ); : "+&d" (sum) : "d" (saddr) : "cc");
__asm__ __volatile__ ( asm volatile(
" alr %0,%1\n" /* sum += daddr */ " alr %0,%1\n" /* sum += daddr */
" brc 12,1f\n" " brc 12,1f\n"
" ahi %0,1\n" /* add carry */ " ahi %0,1\n" /* add carry */
"1:" "1:"
: "+&d" (sum) : "d" (daddr) : "cc" ); : "+&d" (sum) : "d" (daddr) : "cc");
__asm__ __volatile__ ( asm volatile(
" alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */ " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */
" brc 12,2f\n" " brc 12,2f\n"
" ahi %0,1\n" /* add carry */ " ahi %0,1\n" /* add carry */
"2:" "2:"
: "+&d" (sum) : "+&d" (sum)
: "d" (((unsigned int) len<<16) + (unsigned int) proto) : "d" (((unsigned int) len<<16) + (unsigned int) proto)
: "cc" ); : "cc");
#else /* __s390x__ */ #else /* __s390x__ */
__asm__ __volatile__ ( asm volatile(
" lgfr %0,%0\n" " lgfr %0,%0\n"
" algr %0,%1\n" /* sum += saddr */ " algr %0,%1\n" /* sum += saddr */
" brc 12,0f\n" " brc 12,0f\n"
...@@ -230,7 +164,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, ...@@ -230,7 +164,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
: "+&d" (sum) : "+&d" (sum)
: "d" (saddr), "d" (daddr), : "d" (saddr), "d" (daddr),
"d" (((unsigned int) len<<16) + (unsigned int) proto) "d" (((unsigned int) len<<16) + (unsigned int) proto)
: "cc", "0" ); : "cc", "0");
#endif /* __s390x__ */ #endif /* __s390x__ */
return sum; return sum;
} }
......
...@@ -26,7 +26,7 @@ codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr) ...@@ -26,7 +26,7 @@ codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
{ {
if (nr-- <= 0) if (nr-- <= 0)
return; return;
__asm__ __volatile__( asm volatile(
" bras 1,1f\n" " bras 1,1f\n"
" tr 0(1,%0),0(%2)\n" " tr 0(1,%0),0(%2)\n"
"0: tr 0(256,%0),0(%2)\n" "0: tr 0(256,%0),0(%2)\n"
...@@ -35,7 +35,7 @@ codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr) ...@@ -35,7 +35,7 @@ codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
" jnm 0b\n" " jnm 0b\n"
" ex %1,0(1)" " ex %1,0(1)"
: "+&a" (addr), "+&a" (nr) : "+&a" (addr), "+&a" (nr)
: "a" (codepage) : "cc", "memory", "1" ); : "a" (codepage) : "cc", "memory", "1");
} }
#define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr) #define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
......
...@@ -27,18 +27,16 @@ ...@@ -27,18 +27,16 @@
static inline unsigned long virt_to_phys(volatile void * address) static inline unsigned long virt_to_phys(volatile void * address)
{ {
unsigned long real_address; unsigned long real_address;
__asm__ ( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" lra %0,0(%1)\n" " lra %0,0(%1)\n"
" jz 0f\n"
" sr %0,%0\n"
#else /* __s390x__ */ #else /* __s390x__ */
" lrag %0,0(%1)\n" " lrag %0,0(%1)\n"
" jz 0f\n"
" slgr %0,%0\n"
#endif /* __s390x__ */ #endif /* __s390x__ */
" jz 0f\n"
" la %0,0\n"
"0:" "0:"
: "=a" (real_address) : "a" (address) : "cc" ); : "=a" (real_address) : "a" (address) : "cc");
return real_address; return real_address;
} }
......
...@@ -10,43 +10,93 @@ ...@@ -10,43 +10,93 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
/* store then or system mask. */
#define __raw_local_irq_stosm(__or) \
({ \
unsigned long __mask; \
asm volatile( \
" stosm %0,%1" \
: "=Q" (__mask) : "i" (__or) : "memory"); \
__mask; \
})
/* store then and system mask. */
#define __raw_local_irq_stnsm(__and) \
({ \
unsigned long __mask; \
asm volatile( \
" stnsm %0,%1" \
: "=Q" (__mask) : "i" (__and) : "memory"); \
__mask; \
})
/* set system mask. */
#define __raw_local_irq_ssm(__mask) \
({ \
asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \
})
#else /* __GNUC__ */
/* store then or system mask. */
#define __raw_local_irq_stosm(__or) \
({ \
unsigned long __mask; \
asm volatile( \
" stosm 0(%1),%2" \
: "=m" (__mask) \
: "a" (&__mask), "i" (__or) : "memory"); \
__mask; \
})
/* store then and system mask. */
#define __raw_local_irq_stnsm(__and) \
({ \
unsigned long __mask; \
asm volatile( \
" stnsm 0(%1),%2" \
: "=m" (__mask) \
: "a" (&__mask), "i" (__and) : "memory"); \
__mask; \
})
/* set system mask. */
#define __raw_local_irq_ssm(__mask) \
({ \
asm volatile( \
" ssm 0(%0)" \
: : "a" (&__mask), "m" (__mask) : "memory"); \
})
#endif /* __GNUC__ */
/* interrupt control.. */ /* interrupt control.. */
#define raw_local_irq_enable() ({ \ static inline unsigned long raw_local_irq_enable(void)
unsigned long __dummy; \ {
__asm__ __volatile__ ( \ return __raw_local_irq_stosm(0x03);
"stosm 0(%1),0x03" \ }
: "=m" (__dummy) : "a" (&__dummy) : "memory" ); \
})
#define raw_local_irq_disable() ({ \
unsigned long __flags; \
__asm__ __volatile__ ( \
"stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
__flags; \
})
#define raw_local_save_flags(x) \ static inline unsigned long raw_local_irq_disable(void)
do { \ {
typecheck(unsigned long, x); \ return __raw_local_irq_stnsm(0xfc);
__asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ); \ }
} while (0)
#define raw_local_irq_restore(x) \ #define raw_local_save_flags(x) \
do { \ do { \
typecheck(unsigned long, x); \ typecheck(unsigned long, x); \
__asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory"); \ (x) = __raw_local_irq_stosm(0x00); \
} while (0) } while (0)
#define raw_irqs_disabled() \ static inline void raw_local_irq_restore(unsigned long flags)
({ \ {
unsigned long flags; \ __raw_local_irq_ssm(flags);
raw_local_save_flags(flags); \ }
!((flags >> __FLAG_SHIFT) & 3); \
})
static inline int raw_irqs_disabled_flags(unsigned long flags) static inline int raw_irqs_disabled_flags(unsigned long flags)
{ {
return !((flags >> __FLAG_SHIFT) & 3); return !(flags & (3UL << (BITS_PER_LONG - 8)));
} }
/* For spinlocks etc */ /* For spinlocks etc */
......
...@@ -359,7 +359,7 @@ extern struct _lowcore *lowcore_ptr[]; ...@@ -359,7 +359,7 @@ extern struct _lowcore *lowcore_ptr[];
static inline void set_prefix(__u32 address) static inline void set_prefix(__u32 address)
{ {
__asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" ); asm volatile("spx %0" : : "m" (address) : "memory");
} }
#define __PANIC_MAGIC 0xDEADC0DE #define __PANIC_MAGIC 0xDEADC0DE
......
...@@ -22,68 +22,27 @@ ...@@ -22,68 +22,27 @@
#include <asm/setup.h> #include <asm/setup.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifndef __s390x__
static inline void clear_page(void *page)
{
register_pair rp;
rp.subreg.even = (unsigned long) page;
rp.subreg.odd = (unsigned long) 4096;
asm volatile (" slr 1,1\n"
" mvcl %0,0"
: "+&a" (rp) : : "memory", "cc", "1" );
}
static inline void copy_page(void *to, void *from)
{
if (MACHINE_HAS_MVPG)
asm volatile (" sr 0,0\n"
" mvpg %0,%1"
: : "a" ((void *)(to)), "a" ((void *)(from))
: "memory", "cc", "0" );
else
asm volatile (" mvc 0(256,%0),0(%1)\n"
" mvc 256(256,%0),256(%1)\n"
" mvc 512(256,%0),512(%1)\n"
" mvc 768(256,%0),768(%1)\n"
" mvc 1024(256,%0),1024(%1)\n"
" mvc 1280(256,%0),1280(%1)\n"
" mvc 1536(256,%0),1536(%1)\n"
" mvc 1792(256,%0),1792(%1)\n"
" mvc 2048(256,%0),2048(%1)\n"
" mvc 2304(256,%0),2304(%1)\n"
" mvc 2560(256,%0),2560(%1)\n"
" mvc 2816(256,%0),2816(%1)\n"
" mvc 3072(256,%0),3072(%1)\n"
" mvc 3328(256,%0),3328(%1)\n"
" mvc 3584(256,%0),3584(%1)\n"
" mvc 3840(256,%0),3840(%1)\n"
: : "a"((void *)(to)),"a"((void *)(from))
: "memory" );
}
#else /* __s390x__ */
static inline void clear_page(void *page) static inline void clear_page(void *page)
{ {
asm volatile (" lgr 2,%0\n" register unsigned long reg1 asm ("1") = 0;
" lghi 3,4096\n" register void *reg2 asm ("2") = page;
" slgr 1,1\n" register unsigned long reg3 asm ("3") = 4096;
asm volatile(
" mvcl 2,0" " mvcl 2,0"
: : "a" ((void *) (page)) : "+d" (reg2), "+d" (reg3) : "d" (reg1) : "memory", "cc");
: "memory", "cc", "1", "2", "3" );
} }
static inline void copy_page(void *to, void *from) static inline void copy_page(void *to, void *from)
{ {
if (MACHINE_HAS_MVPG) if (MACHINE_HAS_MVPG) {
asm volatile (" sgr 0,0\n" register unsigned long reg0 asm ("0") = 0;
asm volatile(
" mvpg %0,%1" " mvpg %0,%1"
: : "a" ((void *)(to)), "a" ((void *)(from)) : : "a" (to), "a" (from), "d" (reg0)
: "memory", "cc", "0" ); : "memory", "cc");
else } else
asm volatile (" mvc 0(256,%0),0(%1)\n" asm volatile(
" mvc 0(256,%0),0(%1)\n"
" mvc 256(256,%0),256(%1)\n" " mvc 256(256,%0),256(%1)\n"
" mvc 512(256,%0),512(%1)\n" " mvc 512(256,%0),512(%1)\n"
" mvc 768(256,%0),768(%1)\n" " mvc 768(256,%0),768(%1)\n"
...@@ -99,12 +58,9 @@ static inline void copy_page(void *to, void *from) ...@@ -99,12 +58,9 @@ static inline void copy_page(void *to, void *from)
" mvc 3328(256,%0),3328(%1)\n" " mvc 3328(256,%0),3328(%1)\n"
" mvc 3584(256,%0),3584(%1)\n" " mvc 3584(256,%0),3584(%1)\n"
" mvc 3840(256,%0),3840(%1)\n" " mvc 3840(256,%0),3840(%1)\n"
: : "a"((void *)(to)),"a"((void *)(from)) : : "a" (to), "a" (from) : "memory");
: "memory" );
} }
#endif /* __s390x__ */
#define clear_user_page(page, vaddr, pg) clear_page(page) #define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
...@@ -159,7 +115,7 @@ extern unsigned int default_storage_key; ...@@ -159,7 +115,7 @@ extern unsigned int default_storage_key;
static inline void static inline void
page_set_storage_key(unsigned long addr, unsigned int skey) page_set_storage_key(unsigned long addr, unsigned int skey)
{ {
asm volatile ( "sske %0,%1" : : "d" (skey), "a" (addr) ); asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
} }
static inline unsigned int static inline unsigned int
...@@ -167,8 +123,7 @@ page_get_storage_key(unsigned long addr) ...@@ -167,8 +123,7 @@ page_get_storage_key(unsigned long addr)
{ {
unsigned int skey; unsigned int skey;
asm volatile ( "iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0) ); asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0));
return skey; return skey;
} }
......
...@@ -554,9 +554,10 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) ...@@ -554,9 +554,10 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
/* ipte in zarch mode can do the math */ /* ipte in zarch mode can do the math */
pte_t *pto = ptep; pte_t *pto = ptep;
#endif #endif
asm volatile ("ipte %2,%3" asm volatile(
" ipte %2,%3"
: "=m" (*ptep) : "m" (*ptep), : "=m" (*ptep) : "m" (*ptep),
"a" (pto), "a" (address) ); "a" (pto), "a" (address));
} }
pte_val(*ptep) = _PAGE_TYPE_EMPTY; pte_val(*ptep) = _PAGE_TYPE_EMPTY;
} }
...@@ -612,12 +613,13 @@ ptep_establish(struct vm_area_struct *vma, ...@@ -612,12 +613,13 @@ ptep_establish(struct vm_area_struct *vma,
#define page_test_and_clear_young(page) \ #define page_test_and_clear_young(page) \
({ \ ({ \
struct page *__page = (page); \ struct page *__page = (page); \
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);\
int __ccode; \ int __ccode; \
asm volatile ("rrbe 0,%1\n\t" \ asm volatile( \
"ipm %0\n\t" \ " rrbe 0,%1\n" \
"srl %0,28\n\t" \ " ipm %0\n" \
: "=d" (__ccode) : "a" (__physpage) : "cc" ); \ " srl %0,28\n" \
: "=d" (__ccode) : "a" (__physpage) : "cc"); \
(__ccode & 2); \ (__ccode & 2); \
}) })
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#ifndef __ASM_S390_PROCESSOR_H #ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H
#include <asm/page.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -21,7 +20,7 @@ ...@@ -21,7 +20,7 @@
* Default implementation of macro that returns current * Default implementation of macro that returns current
* instruction pointer ("program counter"). * instruction pointer ("program counter").
*/ */
#define current_text_addr() ({ void *pc; __asm__("basr %0,0":"=a"(pc)); pc; }) #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
/* /*
* CPU type and hardware bug flags. Kept separately for each CPU. * CPU type and hardware bug flags. Kept separately for each CPU.
...@@ -202,7 +201,7 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -202,7 +201,7 @@ unsigned long get_wchan(struct task_struct *p);
static inline void cpu_relax(void) static inline void cpu_relax(void)
{ {
if (MACHINE_HAS_DIAG44) if (MACHINE_HAS_DIAG44)
asm volatile ("diag 0,0,68" : : : "memory"); asm volatile("diag 0,0,68" : : : "memory");
else else
barrier(); barrier();
} }
...@@ -213,9 +212,9 @@ static inline void cpu_relax(void) ...@@ -213,9 +212,9 @@ static inline void cpu_relax(void)
static inline void __load_psw(psw_t psw) static inline void __load_psw(psw_t psw)
{ {
#ifndef __s390x__ #ifndef __s390x__
asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc");
#else #else
asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc");
#endif #endif
} }
...@@ -232,20 +231,20 @@ static inline void __load_psw_mask (unsigned long mask) ...@@ -232,20 +231,20 @@ static inline void __load_psw_mask (unsigned long mask)
psw.mask = mask; psw.mask = mask;
#ifndef __s390x__ #ifndef __s390x__
asm volatile ( asm volatile(
" basr %0,0\n" " basr %0,0\n"
"0: ahi %0,1f-0b\n" "0: ahi %0,1f-0b\n"
" st %0,4(%1)\n" " st %0,4(%1)\n"
" lpsw 0(%1)\n" " lpsw 0(%1)\n"
"1:" "1:"
: "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc");
#else /* __s390x__ */ #else /* __s390x__ */
asm volatile ( asm volatile(
" larl %0,1f\n" " larl %0,1f\n"
" stg %0,8(%1)\n" " stg %0,8(%1)\n"
" lpswe 0(%1)\n" " lpswe 0(%1)\n"
"1:" "1:"
: "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc");
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -274,7 +273,8 @@ static inline void disabled_wait(unsigned long code) ...@@ -274,7 +273,8 @@ static inline void disabled_wait(unsigned long code)
* the processor is dead afterwards * the processor is dead afterwards
*/ */
#ifndef __s390x__ #ifndef __s390x__
asm volatile (" stctl 0,0,0(%2)\n" asm volatile(
" stctl 0,0,0(%2)\n"
" ni 0(%2),0xef\n" /* switch off protection */ " ni 0(%2),0xef\n" /* switch off protection */
" lctl 0,0,0(%2)\n" " lctl 0,0,0(%2)\n"
" stpt 0xd8\n" /* store timer */ " stpt 0xd8\n" /* store timer */
...@@ -290,16 +290,17 @@ static inline void disabled_wait(unsigned long code) ...@@ -290,16 +290,17 @@ static inline void disabled_wait(unsigned long code)
" oi 0x1c0,0x10\n" /* fake protection bit */ " oi 0x1c0,0x10\n" /* fake protection bit */
" lpsw 0(%1)" " lpsw 0(%1)"
: "=m" (ctl_buf) : "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
#else /* __s390x__ */ #else /* __s390x__ */
asm volatile (" stctg 0,0,0(%2)\n" asm volatile(
" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */ " ni 4(%2),0xef\n" /* switch off protection */
" lctlg 0,0,0(%2)\n" " lctlg 0,0,0(%2)\n"
" lghi 1,0x1000\n" " lghi 1,0x1000\n"
" stpt 0x328(1)\n" /* store timer */ " stpt 0x328(1)\n" /* store timer */
" stckc 0x330(1)\n" /* store clock comparator */ " stckc 0x330(1)\n" /* store clock comparator */
" stpx 0x318(1)\n" /* store prefix register */ " stpx 0x318(1)\n" /* store prefix register */
" stam 0,15,0x340(1)\n" /* store access registers */ " stam 0,15,0x340(1)\n"/* store access registers */
" stfpc 0x31c(1)\n" /* store fpu control */ " stfpc 0x31c(1)\n" /* store fpu control */
" std 0,0x200(1)\n" /* store f0 */ " std 0,0x200(1)\n" /* store f0 */
" std 1,0x208(1)\n" /* store f1 */ " std 1,0x208(1)\n" /* store f1 */
...@@ -317,13 +318,12 @@ static inline void disabled_wait(unsigned long code) ...@@ -317,13 +318,12 @@ static inline void disabled_wait(unsigned long code)
" std 13,0x268(1)\n" /* store f13 */ " std 13,0x268(1)\n" /* store f13 */
" std 14,0x270(1)\n" /* store f14 */ " std 14,0x270(1)\n" /* store f14 */
" std 15,0x278(1)\n" /* store f15 */ " std 15,0x278(1)\n" /* store f15 */
" stmg 0,15,0x280(1)\n" /* store general registers */ " stmg 0,15,0x280(1)\n"/* store general registers */
" stctg 0,15,0x380(1)\n" /* store control registers */ " stctg 0,15,0x380(1)\n"/* store control registers */
" oi 0x384(1),0x10\n" /* fake protection bit */ " oi 0x384(1),0x10\n"/* fake protection bit */
" lpswe 0(%1)" " lpswe 0(%1)"
: "=m" (ctl_buf) : "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0");
"m" (dw_psw) : "cc", "0", "1");
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
......
...@@ -479,7 +479,7 @@ extern void show_regs(struct pt_regs * regs); ...@@ -479,7 +479,7 @@ extern void show_regs(struct pt_regs * regs);
static inline void static inline void
psw_set_key(unsigned int key) psw_set_key(unsigned int key)
{ {
asm volatile ( "spka 0(%0)" : : "d" (key) ); asm volatile("spka 0(%0)" : : "d" (key));
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -122,7 +122,7 @@ static inline void __down_read(struct rw_semaphore *sem) ...@@ -122,7 +122,7 @@ static inline void __down_read(struct rw_semaphore *sem)
{ {
signed long old, new; signed long old, new;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
...@@ -138,7 +138,7 @@ static inline void __down_read(struct rw_semaphore *sem) ...@@ -138,7 +138,7 @@ static inline void __down_read(struct rw_semaphore *sem)
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "a" (&sem->count), "m" (sem->count),
"i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
if (old < 0) if (old < 0)
rwsem_down_read_failed(sem); rwsem_down_read_failed(sem);
} }
...@@ -150,7 +150,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -150,7 +150,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
{ {
signed long old, new; signed long old, new;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: ltr %1,%0\n" "0: ltr %1,%0\n"
...@@ -170,7 +170,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -170,7 +170,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "a" (&sem->count), "m" (sem->count),
"i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
return old >= 0 ? 1 : 0; return old >= 0 ? 1 : 0;
} }
...@@ -182,7 +182,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) ...@@ -182,7 +182,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
signed long old, new, tmp; signed long old, new, tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS; tmp = RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
...@@ -198,7 +198,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) ...@@ -198,7 +198,7 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp) : "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" ); : "cc", "memory");
if (old != 0) if (old != 0)
rwsem_down_write_failed(sem); rwsem_down_write_failed(sem);
} }
...@@ -215,7 +215,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -215,7 +215,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
{ {
signed long old; signed long old;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%2)\n" " l %0,0(%2)\n"
"0: ltr %0,%0\n" "0: ltr %0,%0\n"
...@@ -232,7 +232,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -232,7 +232,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
"1:" "1:"
: "=&d" (old), "=m" (sem->count) : "=&d" (old), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "a" (&sem->count), "m" (sem->count),
"d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" ); "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory");
return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
} }
...@@ -243,7 +243,7 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -243,7 +243,7 @@ static inline void __up_read(struct rw_semaphore *sem)
{ {
signed long old, new; signed long old, new;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
...@@ -260,7 +260,7 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -260,7 +260,7 @@ static inline void __up_read(struct rw_semaphore *sem)
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), : "a" (&sem->count), "m" (sem->count),
"i" (-RWSEM_ACTIVE_READ_BIAS) "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" ); : "cc", "memory");
if (new < 0) if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0) if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem); rwsem_wake(sem);
...@@ -274,7 +274,7 @@ static inline void __up_write(struct rw_semaphore *sem) ...@@ -274,7 +274,7 @@ static inline void __up_write(struct rw_semaphore *sem)
signed long old, new, tmp; signed long old, new, tmp;
tmp = -RWSEM_ACTIVE_WRITE_BIAS; tmp = -RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
...@@ -290,7 +290,7 @@ static inline void __up_write(struct rw_semaphore *sem) ...@@ -290,7 +290,7 @@ static inline void __up_write(struct rw_semaphore *sem)
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp) : "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" ); : "cc", "memory");
if (new < 0) if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0) if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem); rwsem_wake(sem);
...@@ -304,7 +304,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -304,7 +304,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
signed long old, new, tmp; signed long old, new, tmp;
tmp = -RWSEM_WAITING_BIAS; tmp = -RWSEM_WAITING_BIAS;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
...@@ -320,7 +320,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -320,7 +320,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "m" (tmp) : "a" (&sem->count), "m" (sem->count), "m" (tmp)
: "cc", "memory" ); : "cc", "memory");
if (new > 1) if (new > 1)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
} }
...@@ -332,7 +332,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) ...@@ -332,7 +332,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{ {
signed long old, new; signed long old, new;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
...@@ -348,7 +348,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) ...@@ -348,7 +348,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "d" (delta) : "a" (&sem->count), "m" (sem->count), "d" (delta)
: "cc", "memory" ); : "cc", "memory");
} }
/* /*
...@@ -358,7 +358,7 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) ...@@ -358,7 +358,7 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{ {
signed long old, new; signed long old, new;
__asm__ __volatile__( asm volatile(
#ifndef __s390x__ #ifndef __s390x__
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: lr %1,%0\n" "0: lr %1,%0\n"
...@@ -374,7 +374,7 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) ...@@ -374,7 +374,7 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (old), "=&d" (new), "=m" (sem->count) : "=&d" (old), "=&d" (new), "=m" (sem->count)
: "a" (&sem->count), "m" (sem->count), "d" (delta) : "a" (&sem->count), "m" (sem->count), "d" (delta)
: "cc", "memory" ); : "cc", "memory");
return new; return new;
} }
......
...@@ -85,7 +85,7 @@ static inline int down_trylock(struct semaphore * sem) ...@@ -85,7 +85,7 @@ static inline int down_trylock(struct semaphore * sem)
* sem->count.counter = --new_val; * sem->count.counter = --new_val;
* In the ppc code this is called atomic_dec_if_positive. * In the ppc code this is called atomic_dec_if_positive.
*/ */
__asm__ __volatile__ ( asm volatile(
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: ltr %1,%0\n" "0: ltr %1,%0\n"
" jle 1f\n" " jle 1f\n"
...@@ -95,7 +95,7 @@ static inline int down_trylock(struct semaphore * sem) ...@@ -95,7 +95,7 @@ static inline int down_trylock(struct semaphore * sem)
"1:" "1:"
: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter) : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
: "a" (&sem->count.counter), "m" (sem->count.counter) : "a" (&sem->count.counter), "m" (sem->count.counter)
: "cc", "memory" ); : "cc", "memory");
return old_val <= 0; return old_val <= 0;
} }
......
...@@ -76,7 +76,8 @@ ...@@ -76,7 +76,8 @@
unsigned int __r2 = (x2) + (y2); \ unsigned int __r2 = (x2) + (y2); \
unsigned int __r1 = (x1); \ unsigned int __r1 = (x1); \
unsigned int __r0 = (x0); \ unsigned int __r0 = (x0); \
__asm__ (" alr %2,%3\n" \ asm volatile( \
" alr %2,%3\n" \
" brc 12,0f\n" \ " brc 12,0f\n" \
" lhi 0,1\n" \ " lhi 0,1\n" \
" alr %1,0\n" \ " alr %1,0\n" \
...@@ -85,12 +86,13 @@ ...@@ -85,12 +86,13 @@
"0:" \ "0:" \
: "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
: "d" (y0), "i" (1) : "cc", "0" ); \ : "d" (y0), "i" (1) : "cc", "0" ); \
__asm__ (" alr %1,%2\n" \ asm volatile( \
" alr %1,%2\n" \
" brc 12,0f\n" \ " brc 12,0f\n" \
" ahi %0,1\n" \ " ahi %0,1\n" \
"0:" \ "0:" \
: "+&d" (__r2), "+&d" (__r1) \ : "+&d" (__r2), "+&d" (__r1) \
: "d" (y1) : "cc" ); \ : "d" (y1) : "cc"); \
(r2) = __r2; \ (r2) = __r2; \
(r1) = __r1; \ (r1) = __r1; \
(r0) = __r0; \ (r0) = __r0; \
...@@ -100,7 +102,8 @@ ...@@ -100,7 +102,8 @@
unsigned int __r2 = (x2) - (y2); \ unsigned int __r2 = (x2) - (y2); \
unsigned int __r1 = (x1); \ unsigned int __r1 = (x1); \
unsigned int __r0 = (x0); \ unsigned int __r0 = (x0); \
__asm__ (" slr %2,%3\n" \ asm volatile( \
" slr %2,%3\n" \
" brc 3,0f\n" \ " brc 3,0f\n" \
" lhi 0,1\n" \ " lhi 0,1\n" \
" slr %1,0\n" \ " slr %1,0\n" \
...@@ -108,13 +111,14 @@ ...@@ -108,13 +111,14 @@
" slr %0,0\n" \ " slr %0,0\n" \
"0:" \ "0:" \
: "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \
: "d" (y0) : "cc", "0" ); \ : "d" (y0) : "cc", "0"); \
__asm__ (" slr %1,%2\n" \ asm volatile( \
" slr %1,%2\n" \
" brc 3,0f\n" \ " brc 3,0f\n" \
" ahi %0,-1\n" \ " ahi %0,-1\n" \
"0:" \ "0:" \
: "+&d" (__r2), "+&d" (__r1) \ : "+&d" (__r2), "+&d" (__r1) \
: "d" (y1) : "cc" ); \ : "d" (y1) : "cc"); \
(r2) = __r2; \ (r2) = __r2; \
(r1) = __r1; \ (r1) = __r1; \
(r0) = __r0; \ (r0) = __r0; \
......
...@@ -70,16 +70,16 @@ typedef enum ...@@ -70,16 +70,16 @@ typedef enum
static inline sigp_ccode static inline sigp_ccode
signal_processor(__u16 cpu_addr, sigp_order_code order_code) signal_processor(__u16 cpu_addr, sigp_order_code order_code)
{ {
register unsigned long reg1 asm ("1") = 0;
sigp_ccode ccode; sigp_ccode ccode;
__asm__ __volatile__( asm volatile(
" sr 1,1\n" /* parameter=0 in gpr 1 */ " sigp %1,%2,0(%3)\n"
" sigp 1,%1,0(%2)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (ccode) : "=d" (ccode)
: "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
: "cc" , "memory", "1" ); "a" (order_code) : "cc" , "memory");
return ccode; return ccode;
} }
...@@ -87,20 +87,18 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code) ...@@ -87,20 +87,18 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
* Signal processor with parameter * Signal processor with parameter
*/ */
static inline sigp_ccode static inline sigp_ccode
signal_processor_p(__u32 parameter, __u16 cpu_addr, signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
sigp_order_code order_code)
{ {
register unsigned int reg1 asm ("1") = parameter;
sigp_ccode ccode; sigp_ccode ccode;
__asm__ __volatile__( asm volatile(
" lr 1,%1\n" /* parameter in gpr 1 */ " sigp %1,%2,0(%3)\n"
" sigp 1,%2,0(%3)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (ccode) : "=d" (ccode)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code) "a" (order_code) : "cc" , "memory");
: "cc" , "memory", "1" );
return ccode; return ccode;
} }
...@@ -108,23 +106,20 @@ signal_processor_p(__u32 parameter, __u16 cpu_addr, ...@@ -108,23 +106,20 @@ signal_processor_p(__u32 parameter, __u16 cpu_addr,
* Signal processor with parameter and return status * Signal processor with parameter and return status
*/ */
static inline sigp_ccode static inline sigp_ccode
signal_processor_ps(__u32 *statusptr, __u32 parameter, signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
__u16 cpu_addr, sigp_order_code order_code) sigp_order_code order_code)
{ {
register unsigned int reg1 asm ("1") = parameter;
sigp_ccode ccode; sigp_ccode ccode;
__asm__ __volatile__( asm volatile(
" sr 2,2\n" /* clear status */ " sigp %1,%2,0(%3)\n"
" lr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n"
" st 2,%1\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
: "=d" (ccode), "=m" (*statusptr) : "=d" (ccode), "+d" (reg1)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
"a" (order_code) : "cc" , "memory");
: "cc" , "memory", "2" , "3" *statusptr = reg1;
);
return ccode; return ccode;
} }
......
...@@ -56,7 +56,7 @@ static inline __u16 hard_smp_processor_id(void) ...@@ -56,7 +56,7 @@ static inline __u16 hard_smp_processor_id(void)
{ {
__u16 cpu_address; __u16 cpu_address;
__asm__ ("stap %0\n" : "=m" (cpu_address)); asm volatile("stap %0" : "=m" (cpu_address));
return cpu_address; return cpu_address;
} }
......
...@@ -11,17 +11,36 @@ ...@@ -11,17 +11,36 @@
#ifndef __ASM_SPINLOCK_H #ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
static inline int static inline int
_raw_compare_and_swap(volatile unsigned int *lock, _raw_compare_and_swap(volatile unsigned int *lock,
unsigned int old, unsigned int new) unsigned int old, unsigned int new)
{ {
asm volatile ("cs %0,%3,0(%4)" asm volatile(
" cs %0,%3,%1"
: "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock)
: "cc", "memory" );
return old;
}
#else /* __GNUC__ */
static inline int
_raw_compare_and_swap(volatile unsigned int *lock,
unsigned int old, unsigned int new)
{
asm volatile(
" cs %0,%3,0(%4)"
: "=d" (old), "=m" (*lock) : "=d" (old), "=m" (*lock)
: "0" (old), "d" (new), "a" (lock), "m" (*lock) : "0" (old), "d" (new), "a" (lock), "m" (*lock)
: "cc", "memory" ); : "cc", "memory" );
return old; return old;
} }
#endif /* __GNUC__ */
/* /*
* Simple spin lock operations. There are two variants, one clears IRQ's * Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not. * on the local processor, one does not.
......
...@@ -60,12 +60,13 @@ static inline void *memchr(const void * s, int c, size_t n) ...@@ -60,12 +60,13 @@ static inline void *memchr(const void * s, int c, size_t n)
register int r0 asm("0") = (char) c; register int r0 asm("0") = (char) c;
const void *ret = s + n; const void *ret = s + n;
asm volatile ("0: srst %0,%1\n" asm volatile(
"0: srst %0,%1\n"
" jo 0b\n" " jo 0b\n"
" jl 1f\n" " jl 1f\n"
" la %0,0\n" " la %0,0\n"
"1:" "1:"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
return (void *) ret; return (void *) ret;
} }
...@@ -74,9 +75,10 @@ static inline void *memscan(void *s, int c, size_t n) ...@@ -74,9 +75,10 @@ static inline void *memscan(void *s, int c, size_t n)
register int r0 asm("0") = (char) c; register int r0 asm("0") = (char) c;
const void *ret = s + n; const void *ret = s + n;
asm volatile ("0: srst %0,%1\n" asm volatile(
"0: srst %0,%1\n"
" jo 0b\n" " jo 0b\n"
: "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); : "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
return (void *) ret; return (void *) ret;
} }
...@@ -86,7 +88,8 @@ static inline char *strcat(char *dst, const char *src) ...@@ -86,7 +88,8 @@ static inline char *strcat(char *dst, const char *src)
unsigned long dummy; unsigned long dummy;
char *ret = dst; char *ret = dst;
asm volatile ("0: srst %0,%1\n" asm volatile(
"0: srst %0,%1\n"
" jo 0b\n" " jo 0b\n"
"1: mvst %0,%2\n" "1: mvst %0,%2\n"
" jo 1b" " jo 1b"
...@@ -100,10 +103,11 @@ static inline char *strcpy(char *dst, const char *src) ...@@ -100,10 +103,11 @@ static inline char *strcpy(char *dst, const char *src)
register int r0 asm("0") = 0; register int r0 asm("0") = 0;
char *ret = dst; char *ret = dst;
asm volatile ("0: mvst %0,%1\n" asm volatile(
"0: mvst %0,%1\n"
" jo 0b" " jo 0b"
: "+&a" (dst), "+&a" (src) : "d" (r0) : "+&a" (dst), "+&a" (src) : "d" (r0)
: "cc", "memory" ); : "cc", "memory");
return ret; return ret;
} }
...@@ -112,9 +116,10 @@ static inline size_t strlen(const char *s) ...@@ -112,9 +116,10 @@ static inline size_t strlen(const char *s)
register unsigned long r0 asm("0") = 0; register unsigned long r0 asm("0") = 0;
const char *tmp = s; const char *tmp = s;
asm volatile ("0: srst %0,%1\n" asm volatile(
"0: srst %0,%1\n"
" jo 0b" " jo 0b"
: "+d" (r0), "+a" (tmp) : : "cc" ); : "+d" (r0), "+a" (tmp) : : "cc");
return r0 - (unsigned long) s; return r0 - (unsigned long) s;
} }
...@@ -124,9 +129,10 @@ static inline size_t strnlen(const char * s, size_t n) ...@@ -124,9 +129,10 @@ static inline size_t strnlen(const char * s, size_t n)
const char *tmp = s; const char *tmp = s;
const char *end = s + n; const char *end = s + n;
asm volatile ("0: srst %0,%1\n" asm volatile(
"0: srst %0,%1\n"
" jo 0b" " jo 0b"
: "+a" (end), "+a" (tmp) : "d" (r0) : "cc" ); : "+a" (end), "+a" (tmp) : "d" (r0) : "cc");
return end - s; return end - s;
} }
......
...@@ -23,20 +23,14 @@ struct task_struct; ...@@ -23,20 +23,14 @@ struct task_struct;
extern struct task_struct *__switch_to(void *, void *); extern struct task_struct *__switch_to(void *, void *);
#ifdef __s390x__
#define __FLAG_SHIFT 56
#else /* ! __s390x__ */
#define __FLAG_SHIFT 24
#endif /* ! __s390x__ */
static inline void save_fp_regs(s390_fp_regs *fpregs) static inline void save_fp_regs(s390_fp_regs *fpregs)
{ {
asm volatile ( asm volatile(
" std 0,8(%1)\n" " std 0,8(%1)\n"
" std 2,24(%1)\n" " std 2,24(%1)\n"
" std 4,40(%1)\n" " std 4,40(%1)\n"
" std 6,56(%1)" " std 6,56(%1)"
: "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile(
...@@ -53,17 +47,17 @@ static inline void save_fp_regs(s390_fp_regs *fpregs) ...@@ -53,17 +47,17 @@ static inline void save_fp_regs(s390_fp_regs *fpregs)
" std 13,112(%1)\n" " std 13,112(%1)\n"
" std 14,120(%1)\n" " std 14,120(%1)\n"
" std 15,128(%1)\n" " std 15,128(%1)\n"
: "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory");
} }
static inline void restore_fp_regs(s390_fp_regs *fpregs) static inline void restore_fp_regs(s390_fp_regs *fpregs)
{ {
asm volatile ( asm volatile(
" ld 0,8(%0)\n" " ld 0,8(%0)\n"
" ld 2,24(%0)\n" " ld 2,24(%0)\n"
" ld 4,40(%0)\n" " ld 4,40(%0)\n"
" ld 6,56(%0)" " ld 6,56(%0)"
: : "a" (fpregs), "m" (*fpregs) ); : : "a" (fpregs), "m" (*fpregs));
if (!MACHINE_HAS_IEEE) if (!MACHINE_HAS_IEEE)
return; return;
asm volatile( asm volatile(
...@@ -80,17 +74,17 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs) ...@@ -80,17 +74,17 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
" ld 13,112(%0)\n" " ld 13,112(%0)\n"
" ld 14,120(%0)\n" " ld 14,120(%0)\n"
" ld 15,128(%0)\n" " ld 15,128(%0)\n"
: : "a" (fpregs), "m" (*fpregs) ); : : "a" (fpregs), "m" (*fpregs));
} }
static inline void save_access_regs(unsigned int *acrs) static inline void save_access_regs(unsigned int *acrs)
{ {
asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" ); asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory");
} }
static inline void restore_access_regs(unsigned int *acrs) static inline void restore_access_regs(unsigned int *acrs)
{ {
asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) ); asm volatile("lam 0,15,0(%0)" : : "a" (acrs));
} }
#define switch_to(prev,next,last) do { \ #define switch_to(prev,next,last) do { \
...@@ -126,7 +120,7 @@ extern void account_system_vtime(struct task_struct *); ...@@ -126,7 +120,7 @@ extern void account_system_vtime(struct task_struct *);
account_vtime(prev); \ account_vtime(prev); \
} while (0) } while (0)
#define nop() __asm__ __volatile__ ("nop") #define nop() asm volatile("nop")
#define xchg(ptr,x) \ #define xchg(ptr,x) \
({ \ ({ \
...@@ -155,7 +149,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -155,7 +149,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(int *) addr) : "=&d" (old), "=m" (*(int *) addr)
: "d" (x << shift), "d" (~(255 << shift)), "a" (addr), : "d" (x << shift), "d" (~(255 << shift)), "a" (addr),
"m" (*(int *) addr) : "memory", "cc", "0" ); "m" (*(int *) addr) : "memory", "cc", "0");
x = old >> shift; x = old >> shift;
break; break;
case 2: case 2:
...@@ -171,28 +165,28 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -171,28 +165,28 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(int *) addr) : "=&d" (old), "=m" (*(int *) addr)
: "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr),
"m" (*(int *) addr) : "memory", "cc", "0" ); "m" (*(int *) addr) : "memory", "cc", "0");
x = old >> shift; x = old >> shift;
break; break;
case 4: case 4:
asm volatile ( asm volatile(
" l %0,0(%3)\n" " l %0,0(%3)\n"
"0: cs %0,%2,0(%3)\n" "0: cs %0,%2,0(%3)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(int *) ptr) : "=&d" (old), "=m" (*(int *) ptr)
: "d" (x), "a" (ptr), "m" (*(int *) ptr) : "d" (x), "a" (ptr), "m" (*(int *) ptr)
: "memory", "cc" ); : "memory", "cc");
x = old; x = old;
break; break;
#ifdef __s390x__ #ifdef __s390x__
case 8: case 8:
asm volatile ( asm volatile(
" lg %0,0(%3)\n" " lg %0,0(%3)\n"
"0: csg %0,%2,0(%3)\n" "0: csg %0,%2,0(%3)\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (old), "=m" (*(long *) ptr) : "=&d" (old), "=m" (*(long *) ptr)
: "d" (x), "a" (ptr), "m" (*(long *) ptr) : "d" (x), "a" (ptr), "m" (*(long *) ptr)
: "memory", "cc" ); : "memory", "cc");
x = old; x = old;
break; break;
#endif /* __s390x__ */ #endif /* __s390x__ */
...@@ -238,7 +232,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -238,7 +232,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
: "=&d" (prev), "=&d" (tmp) : "=&d" (prev), "=&d" (tmp)
: "d" (old << shift), "d" (new << shift), "a" (ptr), : "d" (old << shift), "d" (new << shift), "a" (ptr),
"d" (~(255 << shift)) "d" (~(255 << shift))
: "memory", "cc" ); : "memory", "cc");
return prev >> shift; return prev >> shift;
case 2: case 2:
addr = (unsigned long) ptr; addr = (unsigned long) ptr;
...@@ -259,20 +253,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -259,20 +253,20 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
: "=&d" (prev), "=&d" (tmp) : "=&d" (prev), "=&d" (tmp)
: "d" (old << shift), "d" (new << shift), "a" (ptr), : "d" (old << shift), "d" (new << shift), "a" (ptr),
"d" (~(65535 << shift)) "d" (~(65535 << shift))
: "memory", "cc" ); : "memory", "cc");
return prev >> shift; return prev >> shift;
case 4: case 4:
asm volatile ( asm volatile(
" cs %0,%2,0(%3)\n" " cs %0,%2,0(%3)\n"
: "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
: "memory", "cc" ); : "memory", "cc");
return prev; return prev;
#ifdef __s390x__ #ifdef __s390x__
case 8: case 8:
asm volatile ( asm volatile(
" csg %0,%2,0(%3)\n" " csg %0,%2,0(%3)\n"
: "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
: "memory", "cc" ); : "memory", "cc");
return prev; return prev;
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -289,8 +283,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -289,8 +283,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
* all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
*/ */
#define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" ) #define eieio() asm volatile("bcr 15,0" : : : "memory")
# define SYNC_OTHER_CORES(x) eieio() #define SYNC_OTHER_CORES(x) eieio()
#define mb() eieio() #define mb() eieio()
#define rmb() eieio() #define rmb() eieio()
#define wmb() eieio() #define wmb() eieio()
...@@ -309,114 +303,53 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -309,114 +303,53 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define __ctl_load(array, low, high) ({ \ #define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
__asm__ __volatile__ ( \ asm volatile( \
" bras 1,0f\n" \ " lctlg %1,%2,0(%0)\n" \
" lctlg 0,0,0(%0)\n" \ : : "a" (&array), "i" (low), "i" (high), \
"0: ex %1,0(1)" \ "m" (*(addrtype *)(array))); \
: : "a" (&array), "a" (((low)<<4)+(high)), \
"m" (*(addrtype *)(array)) : "1" ); \
}) })
#define __ctl_store(array, low, high) ({ \ #define __ctl_store(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
__asm__ __volatile__ ( \ asm volatile( \
" bras 1,0f\n" \ " stctg %2,%3,0(%1)\n" \
" stctg 0,0,0(%1)\n" \
"0: ex %2,0(1)" \
: "=m" (*(addrtype *)(array)) \ : "=m" (*(addrtype *)(array)) \
: "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \ : "a" (&array), "i" (low), "i" (high)); \
})
#define __ctl_set_bit(cr, bit) ({ \
__u8 __dummy[24]; \
__asm__ __volatile__ ( \
" bras 1,0f\n" /* skip indirect insns */ \
" stctg 0,0,0(%1)\n" \
" lctlg 0,0,0(%1)\n" \
"0: ex %2,0(1)\n" /* execute stctl */ \
" lg 0,0(%1)\n" \
" ogr 0,%3\n" /* set the bit */ \
" stg 0,0(%1)\n" \
"1: ex %2,6(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (1L<<(bit)) \
: "cc", "0", "1" ); \
})
#define __ctl_clear_bit(cr, bit) ({ \
__u8 __dummy[16]; \
__asm__ __volatile__ ( \
" bras 1,0f\n" /* skip indirect insns */ \
" stctg 0,0,0(%1)\n" \
" lctlg 0,0,0(%1)\n" \
"0: ex %2,0(1)\n" /* execute stctl */ \
" lg 0,0(%1)\n" \
" ngr 0,%3\n" /* set the bit */ \
" stg 0,0(%1)\n" \
"1: ex %2,6(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (~(1L<<(bit))) \
: "cc", "0", "1" ); \
}) })
#else /* __s390x__ */ #else /* __s390x__ */
#define __ctl_load(array, low, high) ({ \ #define __ctl_load(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
__asm__ __volatile__ ( \ asm volatile( \
" bras 1,0f\n" \ " lctl %1,%2,0(%0)\n" \
" lctl 0,0,0(%0)\n" \ : : "a" (&array), "i" (low), "i" (high), \
"0: ex %1,0(1)" \ "m" (*(addrtype *)(array))); \
: : "a" (&array), "a" (((low)<<4)+(high)), \ })
"m" (*(addrtype *)(array)) : "1" ); \
})
#define __ctl_store(array, low, high) ({ \ #define __ctl_store(array, low, high) ({ \
typedef struct { char _[sizeof(array)]; } addrtype; \ typedef struct { char _[sizeof(array)]; } addrtype; \
__asm__ __volatile__ ( \ asm volatile( \
" bras 1,0f\n" \ " stctl %2,%3,0(%1)\n" \
" stctl 0,0,0(%1)\n" \
"0: ex %2,0(1)" \
: "=m" (*(addrtype *)(array)) \ : "=m" (*(addrtype *)(array)) \
: "a" (&array), "a" (((low)<<4)+(high)): "1" ); \ : "a" (&array), "i" (low), "i" (high)); \
}) })
#endif /* __s390x__ */
#define __ctl_set_bit(cr, bit) ({ \ #define __ctl_set_bit(cr, bit) ({ \
__u8 __dummy[16]; \ unsigned long __dummy; \
__asm__ __volatile__ ( \ __ctl_store(__dummy, cr, cr); \
" bras 1,0f\n" /* skip indirect insns */ \ __dummy |= 1UL << (bit); \
" stctl 0,0,0(%1)\n" \ __ctl_load(__dummy, cr, cr); \
" lctl 0,0,0(%1)\n" \ })
"0: ex %2,0(1)\n" /* execute stctl */ \
" l 0,0(%1)\n" \
" or 0,%3\n" /* set the bit */ \
" st 0,0(%1)\n" \
"1: ex %2,4(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (1<<(bit)) \
: "cc", "0", "1" ); \
})
#define __ctl_clear_bit(cr, bit) ({ \ #define __ctl_clear_bit(cr, bit) ({ \
__u8 __dummy[16]; \ unsigned long __dummy; \
__asm__ __volatile__ ( \ __ctl_store(__dummy, cr, cr); \
" bras 1,0f\n" /* skip indirect insns */ \ __dummy &= ~(1UL << (bit)); \
" stctl 0,0,0(%1)\n" \ __ctl_load(__dummy, cr, cr); \
" lctl 0,0,0(%1)\n" \ })
"0: ex %2,0(1)\n" /* execute stctl */ \
" l 0,0(%1)\n" \
" nr 0,%3\n" /* set the bit */ \
" st 0,0(%1)\n" \
"1: ex %2,4(1)" /* execute lctl */ \
: "=m" (__dummy) \
: "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
"a" (cr*17), "a" (~(1<<(bit))) \
: "cc", "0", "1" ); \
})
#endif /* __s390x__ */
#include <linux/irqflags.h> #include <linux/irqflags.h>
...@@ -427,8 +360,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -427,8 +360,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
static inline void static inline void
__set_psw_mask(unsigned long mask) __set_psw_mask(unsigned long mask)
{ {
local_save_flags(mask); __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
__load_psw_mask(mask);
} }
#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
......
...@@ -15,20 +15,21 @@ ...@@ -15,20 +15,21 @@
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
static inline cycles_t get_cycles(void)
{
cycles_t cycles;
__asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc");
return cycles >> 2;
}
static inline unsigned long long get_clock (void) static inline unsigned long long get_clock (void)
{ {
unsigned long long clk; unsigned long long clk;
__asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile("stck %0" : "=Q" (clk) : : "cc");
#else /* __GNUC__ */
asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc");
#endif /* __GNUC__ */
return clk; return clk;
} }
static inline cycles_t get_cycles(void)
{
return (cycles_t) get_clock() >> 2;
}
#endif #endif
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
*/ */
#define local_flush_tlb() \ #define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0) do { asm volatile("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -68,24 +68,24 @@ extern void smp_ptlb_all(void); ...@@ -68,24 +68,24 @@ extern void smp_ptlb_all(void);
static inline void global_flush_tlb(void) static inline void global_flush_tlb(void)
{ {
register unsigned long reg2 asm("2");
register unsigned long reg3 asm("3");
register unsigned long reg4 asm("4");
long dummy;
#ifndef __s390x__ #ifndef __s390x__
if (!MACHINE_HAS_CSP) { if (!MACHINE_HAS_CSP) {
smp_ptlb_all(); smp_ptlb_all();
return; return;
} }
#endif /* __s390x__ */ #endif /* __s390x__ */
{
register unsigned long addr asm("4");
long dummy;
dummy = 0; dummy = 0;
addr = ((unsigned long) &dummy) + 1; reg2 = reg3 = 0;
__asm__ __volatile__ ( reg4 = ((unsigned long) &dummy) + 1;
" slr 2,2\n" asm volatile(
" slr 3,3\n" " csp %0,%2"
" csp 2,%0" : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
: : "a" (addr), "m" (dummy) : "cc", "2", "3" );
}
} }
/* /*
...@@ -102,9 +102,9 @@ static inline void __flush_tlb_mm(struct mm_struct * mm) ...@@ -102,9 +102,9 @@ static inline void __flush_tlb_mm(struct mm_struct * mm)
if (unlikely(cpus_empty(mm->cpu_vm_mask))) if (unlikely(cpus_empty(mm->cpu_vm_mask)))
return; return;
if (MACHINE_HAS_IDTE) { if (MACHINE_HAS_IDTE) {
asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0" asm volatile(
: : "a" (2048), " .insn rrf,0xb98e0000,0,%0,%1,0"
"a" (__pa(mm->pgd)&PAGE_MASK) : "cc" ); : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
return; return;
} }
preempt_disable(); preempt_disable();
......
...@@ -38,25 +38,14 @@ ...@@ -38,25 +38,14 @@
#define get_ds() (KERNEL_DS) #define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.mm_segment) #define get_fs() (current->thread.mm_segment)
#ifdef __s390x__
#define set_fs(x) \ #define set_fs(x) \
({ \ ({ \
unsigned long __pto; \ unsigned long __pto; \
current->thread.mm_segment = (x); \ current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \ __pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ __ctl_load(__pto, 7, 7); \
}) })
#else /* __s390x__ */
#define set_fs(x) \
({ \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \
})
#endif /* __s390x__ */
#define segment_eq(a,b) ((a).ar4 == (b).ar4) #define segment_eq(a,b) ((a).ar4 == (b).ar4)
......
...@@ -359,7 +359,7 @@ do { \ ...@@ -359,7 +359,7 @@ do { \
type name(void) { \ type name(void) { \
register long __svcres asm("2"); \ register long __svcres asm("2"); \
long __res; \ long __res; \
__asm__ __volatile__ ( \ asm volatile( \
" .if %1 < 256\n" \ " .if %1 < 256\n" \
" svc %b1\n" \ " svc %b1\n" \
" .else\n" \ " .else\n" \
...@@ -368,7 +368,7 @@ type name(void) { \ ...@@ -368,7 +368,7 @@ type name(void) { \
" .endif" \ " .endif" \
: "=d" (__svcres) \ : "=d" (__svcres) \
: "i" (__NR_##name) \ : "i" (__NR_##name) \
: _svc_clobber ); \ : _svc_clobber); \
__res = __svcres; \ __res = __svcres; \
__syscall_return(type,__res); \ __syscall_return(type,__res); \
} }
...@@ -378,7 +378,7 @@ type name(type1 arg1) { \ ...@@ -378,7 +378,7 @@ type name(type1 arg1) { \
register type1 __arg1 asm("2") = arg1; \ register type1 __arg1 asm("2") = arg1; \
register long __svcres asm("2"); \ register long __svcres asm("2"); \
long __res; \ long __res; \
__asm__ __volatile__ ( \ asm volatile( \
" .if %1 < 256\n" \ " .if %1 < 256\n" \
" svc %b1\n" \ " svc %b1\n" \
" .else\n" \ " .else\n" \
...@@ -388,7 +388,7 @@ type name(type1 arg1) { \ ...@@ -388,7 +388,7 @@ type name(type1 arg1) { \
: "=d" (__svcres) \ : "=d" (__svcres) \
: "i" (__NR_##name), \ : "i" (__NR_##name), \
"0" (__arg1) \ "0" (__arg1) \
: _svc_clobber ); \ : _svc_clobber); \
__res = __svcres; \ __res = __svcres; \
__syscall_return(type,__res); \ __syscall_return(type,__res); \
} }
...@@ -399,7 +399,7 @@ type name(type1 arg1, type2 arg2) { \ ...@@ -399,7 +399,7 @@ type name(type1 arg1, type2 arg2) { \
register type2 __arg2 asm("3") = arg2; \ register type2 __arg2 asm("3") = arg2; \
register long __svcres asm("2"); \ register long __svcres asm("2"); \
long __res; \ long __res; \
__asm__ __volatile__ ( \ asm volatile( \
" .if %1 < 256\n" \ " .if %1 < 256\n" \
" svc %b1\n" \ " svc %b1\n" \
" .else\n" \ " .else\n" \
...@@ -415,14 +415,14 @@ type name(type1 arg1, type2 arg2) { \ ...@@ -415,14 +415,14 @@ type name(type1 arg1, type2 arg2) { \
__syscall_return(type,__res); \ __syscall_return(type,__res); \
} }
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)\ #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1, type2 arg2, type3 arg3) { \ type name(type1 arg1, type2 arg2, type3 arg3) { \
register type1 __arg1 asm("2") = arg1; \ register type1 __arg1 asm("2") = arg1; \
register type2 __arg2 asm("3") = arg2; \ register type2 __arg2 asm("3") = arg2; \
register type3 __arg3 asm("4") = arg3; \ register type3 __arg3 asm("4") = arg3; \
register long __svcres asm("2"); \ register long __svcres asm("2"); \
long __res; \ long __res; \
__asm__ __volatile__ ( \ asm volatile( \
" .if %1 < 256\n" \ " .if %1 < 256\n" \
" svc %b1\n" \ " svc %b1\n" \
" .else\n" \ " .else\n" \
...@@ -434,12 +434,12 @@ type name(type1 arg1, type2 arg2, type3 arg3) { \ ...@@ -434,12 +434,12 @@ type name(type1 arg1, type2 arg2, type3 arg3) { \
"0" (__arg1), \ "0" (__arg1), \
"d" (__arg2), \ "d" (__arg2), \
"d" (__arg3) \ "d" (__arg3) \
: _svc_clobber ); \ : _svc_clobber); \
__res = __svcres; \ __res = __svcres; \
__syscall_return(type,__res); \ __syscall_return(type,__res); \
} }
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,\ #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \
type4,name4) \ type4,name4) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
register type1 __arg1 asm("2") = arg1; \ register type1 __arg1 asm("2") = arg1; \
...@@ -448,7 +448,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ ...@@ -448,7 +448,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
register type4 __arg4 asm("5") = arg4; \ register type4 __arg4 asm("5") = arg4; \
register long __svcres asm("2"); \ register long __svcres asm("2"); \
long __res; \ long __res; \
__asm__ __volatile__ ( \ asm volatile( \
" .if %1 < 256\n" \ " .if %1 < 256\n" \
" svc %b1\n" \ " svc %b1\n" \
" .else\n" \ " .else\n" \
...@@ -461,12 +461,12 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ ...@@ -461,12 +461,12 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
"d" (__arg2), \ "d" (__arg2), \
"d" (__arg3), \ "d" (__arg3), \
"d" (__arg4) \ "d" (__arg4) \
: _svc_clobber ); \ : _svc_clobber); \
__res = __svcres; \ __res = __svcres; \
__syscall_return(type,__res); \ __syscall_return(type,__res); \
} }
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,\ #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \
type4,name4,type5,name5) \ type4,name4,type5,name5) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
type5 arg5) { \ type5 arg5) { \
...@@ -477,7 +477,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ ...@@ -477,7 +477,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
register type5 __arg5 asm("6") = arg5; \ register type5 __arg5 asm("6") = arg5; \
register long __svcres asm("2"); \ register long __svcres asm("2"); \
long __res; \ long __res; \
__asm__ __volatile__ ( \ asm volatile( \
" .if %1 < 256\n" \ " .if %1 < 256\n" \
" svc %b1\n" \ " svc %b1\n" \
" .else\n" \ " .else\n" \
...@@ -491,7 +491,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ ...@@ -491,7 +491,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
"d" (__arg3), \ "d" (__arg3), \
"d" (__arg4), \ "d" (__arg4), \
"d" (__arg5) \ "d" (__arg5) \
: _svc_clobber ); \ : _svc_clobber); \
__res = __svcres; \ __res = __svcres; \
__syscall_return(type,__res); \ __syscall_return(type,__res); \
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment