Commit e6268940 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390/s390x unification (6/7)

Merge s390x and s390 to one architecture.
parent 71000494
This diff is collapsed.
......@@ -13,22 +13,63 @@
#ifdef __GNUC__
#ifdef __s390x__
static __inline__ __const__ __u64 ___arch__swab64p(__u64 *x)
{
__u64 result;
__asm__ __volatile__ (
" lrvg %0,%1"
: "=d" (result) : "m" (*x) );
return result;
}
static __inline__ __const__ __u64 ___arch__swab64(__u64 x)
{
__u64 result;
__asm__ __volatile__ (
" lrvgr %0,%1"
: "=d" (result) : "d" (x) );
return result;
}
static __inline__ void ___arch__swab64s(__u64 *x)
{
*x = ___arch__swab64p(x);
}
#endif /* __s390x__ */
static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
{
__u32 result;
__asm__ __volatile__ (
#ifndef __s390x__
" icm %0,8,3(%1)\n"
" icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)"
: "=&d" (result) : "a" (x) : "cc" );
#else /* __s390x__ */
" lrv %0,%1"
: "=d" (result) : "m" (*x) );
#endif /* __s390x__ */
return result;
}
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
#ifndef __s390x__
return ___arch__swab32p(&x);
#else /* __s390x__ */
__u32 result;
__asm__ __volatile__ (
" lrvr %0,%1"
: "=d" (result) : "d" (x) );
return result;
#endif /* __s390x__ */
}
static __inline__ void ___arch__swab32s(__u32 *x)
......@@ -41,9 +82,14 @@ static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
__u16 result;
__asm__ __volatile__ (
#ifndef __s390x__
" icm %0,2,1(%1)\n"
" ic %0,0(%1)\n"
: "=&d" (result) : "a" (x) : "cc" );
#else /* __s390x__ */
" lrvh %0,%1"
: "=d" (result) : "m" (*x) );
#endif /* __s390x__ */
return result;
}
......@@ -57,6 +103,11 @@ static __inline__ void ___arch__swab16s(__u16 *x)
*x = ___arch__swab16p(x);
}
#ifdef __s390x__
#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab64p(x) ___arch__swab64p(x)
#define __arch__swab64s(x) ___arch__swab64s(x)
#endif /* __s390x__ */
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#define __arch__swab32p(x) ___arch__swab32p(x)
......@@ -64,10 +115,14 @@ static __inline__ void ___arch__swab16s(__u16 *x)
#define __arch__swab32s(x) ___arch__swab32s(x)
#define __arch__swab16s(x) ___arch__swab16s(x)
#ifndef __s390x__
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#else /* __s390x__ */
#define __BYTEORDER_HAS_U64__
#endif /* __s390x__ */
#endif /* __GNUC__ */
......
......@@ -157,6 +157,16 @@ extern int ccw_device_set_options(struct ccw_device *, unsigned long);
*/
extern int ccw_device_start(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, unsigned long);
/*
* ccw_device_start_timeout()
*
* This function notifies the device driver if the channel program has not
* completed during the specified time. If a timeout occurs, the channel
* program is terminated via xsch(), hsch() or csch().
*/
extern int ccw_device_start_timeout(struct ccw_device *, struct ccw1 *,
unsigned long, __u8, unsigned long, int);
extern int ccw_device_resume(struct ccw_device *);
extern int ccw_device_halt(struct ccw_device *, unsigned long);
extern int ccw_device_clear(struct ccw_device *, unsigned long);
......
......@@ -30,17 +30,29 @@
static inline unsigned int
csum_partial(const unsigned char * buff, int len, unsigned int sum)
{
register_pair rp;
/*
* Experiments with ethernet and slip connections show that buf
* is aligned on either a 2-byte or 4-byte boundary.
*/
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) buff;
rp.subreg.odd = (unsigned long) len;
__asm__ __volatile__ (
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc" );
#else /* __s390x__ */
__asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum)
: "d" (buff), "d" (len)
: "cc", "2", "3" );
#endif /* __s390x__ */
return sum;
}
......@@ -50,6 +62,7 @@ csum_partial(const unsigned char * buff, int len, unsigned int sum)
static inline unsigned int
csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
{
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) buff;
......@@ -58,6 +71,16 @@ csum_partial_inline(const unsigned char * buff, int len, unsigned int sum)
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum), "+&a" (rp) : : "cc" );
#else /* __s390x__ */
__asm__ __volatile__ (
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on longs */
" jo 0b\n"
: "+&d" (sum)
: "d" (buff), "d" (len)
: "cc", "2", "3" );
#endif /* __s390x__ */
return sum;
}
......@@ -100,6 +123,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum
static inline unsigned short
csum_fold(unsigned int sum)
{
#ifndef __s390x__
register_pair rp;
__asm__ __volatile__ (
......@@ -110,6 +134,16 @@ csum_fold(unsigned int sum)
" alr %0,%1\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum), "=d" (rp) : : "cc" );
#else /* __s390x__ */
__asm__ __volatile__ (
" sr 3,3\n" /* %0 = H*65536 + L */
" lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */
" srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */
" alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */
" alr %0,2\n" /* %0 = H+L+C L+H */
" srl %0,16\n" /* %0 = H+L+C */
: "+&d" (sum) : : "cc", "2", "3");
#endif /* __s390x__ */
return ((unsigned short) ~sum);
}
......@@ -121,8 +155,9 @@ csum_fold(unsigned int sum)
static inline unsigned short
ip_fast_csum(unsigned char *iph, unsigned int ihl)
{
register_pair rp;
unsigned long sum;
#ifndef __s390x__
register_pair rp;
rp.subreg.even = (unsigned long) iph;
rp.subreg.odd = (unsigned long) ihl*4;
......@@ -131,6 +166,17 @@ ip_fast_csum(unsigned char *iph, unsigned int ihl)
"0: cksm %0,%1\n" /* do checksum on longs */
" jo 0b\n"
: "=&d" (sum), "+&a" (rp) : : "cc" );
#else /* __s390x__ */
__asm__ __volatile__ (
" slgr %0,%0\n" /* set sum to zero */
" lgr 2,%1\n" /* address in gpr 2 */
" lgfr 3,%2\n" /* length in gpr 3 */
"0: cksm %0,2\n" /* do checksum on ints */
" jo 0b\n"
: "=&d" (sum)
: "d" (iph), "d" (ihl*4)
: "cc", "2", "3" );
#endif /* __s390x__ */
return csum_fold(sum);
}
......@@ -143,6 +189,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
unsigned short len, unsigned short proto,
unsigned int sum)
{
#ifndef __s390x__
__asm__ __volatile__ (
" alr %0,%1\n" /* sum += saddr */
" brc 12,0f\n"
......@@ -163,6 +210,28 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
: "+&d" (sum)
: "d" (((unsigned int) len<<16) + (unsigned int) proto)
: "cc" );
#else /* __s390x__ */
__asm__ __volatile__ (
" lgfr %0,%0\n"
" algr %0,%1\n" /* sum += saddr */
" brc 12,0f\n"
" aghi %0,1\n" /* add carry */
"0: algr %0,%2\n" /* sum += daddr */
" brc 12,1f\n"
" aghi %0,1\n" /* add carry */
"1: algfr %0,%3\n" /* sum += (len<<16) + proto */
" brc 12,2f\n"
" aghi %0,1\n" /* add carry */
"2: srlg 0,%0,32\n"
" alr %0,0\n" /* fold to 32 bits */
" brc 12,3f\n"
" ahi %0,1\n" /* add carry */
"3: llgfr %0,%0"
: "+&d" (sum)
: "d" (saddr), "d" (daddr),
"d" (((unsigned int) len<<16) + (unsigned int) proto)
: "cc", "0" );
#endif /* __s390x__ */
return sum;
}
......
#ifndef _ASM_S390X_COMPAT_H
#define _ASM_S390X_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 compat_uid_t;
typedef u16 compat_gid_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
compat_uid_t st_uid;
compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
};
struct compat_statfs {
s32 f_type;
s32 f_bsize;
s32 f_blocks;
s32 f_bfree;
s32 f_bavail;
s32 f_files;
s32 f_ffree;
compat_fsid_t f_fsid;
s32 f_namelen;
s32 f_spare[6];
};
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately comverted them already.
*/
typedef u32 compat_uptr_t;
static inline void *compat_ptr(compat_uptr_t uptr)
{
return (void *)(unsigned long)(uptr & 0x7fffffffUL);
}
#endif /* _ASM_S390X_COMPAT_H */
#ifndef __S390_DIV64
#define __S390_DIV64
#ifndef __s390x__
/* for do_div "base" needs to be smaller than 2^31-1 */
#define do_div(n, base) ({ \
unsigned long long __n = (n); \
unsigned long __r; \
......@@ -41,4 +42,14 @@
__r; \
})
#else /* __s390x__ */
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
#endif /* __s390x__ */
#endif
......@@ -21,8 +21,8 @@ extern __u8 _ebcasc[]; /* EBCDIC -> ASCII conversion table */
extern __u8 _ebc_tolower[]; /* EBCDIC -> lowercase */
extern __u8 _ebc_toupper[]; /* EBCDIC -> uppercase */
extern __inline__
void codepage_convert(const __u8 *codepage, volatile __u8 * addr, int nr)
extern __inline__ void
codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
{
if (nr-- <= 0)
return;
......
......@@ -23,7 +23,11 @@ typedef s390_regs elf_gregset_t;
/*
* These are used to set parameters in the core dumps.
*/
#ifndef __s390x__
#define ELF_CLASS ELFCLASS32
#else /* __s390x__ */
#define ELF_CLASS ELFCLASS64
#endif /* __s390x__ */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
......@@ -36,8 +40,16 @@ typedef s390_regs elf_gregset_t;
/* For SVR4/S390 the function pointer to be registered with `atexit` is
passed in R14. */
#ifndef __s390x__
#define ELF_PLAT_INIT(_r, load_addr) \
_r->gprs[14] = 0
#else /* __s390x__ */
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->gprs[14] = 0; \
clear_thread_flag(TIF_31BIT); \
} while(0)
#endif /* __s390x__ */
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
......@@ -47,9 +59,13 @@ typedef s390_regs elf_gregset_t;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef __s390x__
#define ELF_ET_DYN_BASE ((TASK_SIZE & 0x80000000) \
? TASK_SIZE / 3 * 2 \
: 2 * TASK_SIZE / 3)
#else /* __s390x__ */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif /* __s390x__ */
/* Wow, the "main" arch needs arch dependent functions too.. :) */
......@@ -76,7 +92,18 @@ typedef s390_regs elf_gregset_t;
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
#ifndef __s390x__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
#else /* __s390x__ */
#define SET_PERSONALITY(ex, ibcs2) \
do { \
if (ibcs2) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \
clear_thread_flag(TIF_31BIT); \
} while (0)
#endif /* __s390x__ */
#endif
#endif
......@@ -42,10 +42,11 @@
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
#ifndef __s390x__
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif /* ! __s390x__ */
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
......@@ -82,6 +83,7 @@ struct flock {
pid_t l_pid;
};
#ifndef __s390x__
struct flock64 {
short l_type;
short l_whence;
......@@ -89,6 +91,6 @@ struct flock64 {
loff_t l_len;
pid_t l_pid;
};
#endif
#define F_LINUX_SPECIFIC_BASE 1024
#endif
/*
* include/asm-s390/gdb-stub.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/
#ifndef __S390_GDB_STUB__
#define __S390_GDB_STUB__
#include <linux/config.h>
#if CONFIG_REMOTE_DEBUG
#include <asm/ptrace.h>
extern int gdb_stub_initialised;
extern void gdb_stub_handle_exception(struct gdb_pt_regs *regs,int sigval);
struct net_device;
extern struct net_device *gdb_dev;
void gdb_do_timers(void);
extern int putDebugChar(char c); /* write a single character */
extern char getDebugChar(void); /* read and return a single char */
#endif
#endif
......@@ -21,7 +21,7 @@
#include <asm/cio.h>
#include <asm/uaccess.h>
#ifdef CONFIG_ARCH_S390X
#ifdef __s390x__
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#else
#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
......@@ -34,7 +34,7 @@
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
#if defined(CONFIG_ARCH_S390X)
#ifdef __s390x__
return ((__pa(vaddr) + length) >> 31) != 0;
#else
return 0;
......@@ -48,7 +48,7 @@ idal_is_needed(void *vaddr, unsigned int length)
static inline unsigned int
idal_nr_words(void *vaddr, unsigned int length)
{
#if defined(CONFIG_ARCH_S390X)
#ifdef __s390x__
if (idal_is_needed(vaddr, length))
return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
......@@ -62,7 +62,7 @@ idal_nr_words(void *vaddr, unsigned int length)
static inline unsigned long *
idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length)
{
#if defined(CONFIG_ARCH_S390X)
#ifdef __s390x__
unsigned long paddr;
unsigned int cidaw;
......@@ -86,7 +86,7 @@ idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length)
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
#if defined (CONFIG_ARCH_S390X)
#ifdef __s390x__
unsigned int nridaws;
unsigned long *idal;
......@@ -113,7 +113,7 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
#if defined(CONFIG_ARCH_S390X)
#ifdef __s390x__
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
......@@ -190,7 +190,7 @@ idal_buffer_free(struct idal_buffer *ib)
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
#ifdef CONFIG_ARCH_S390X
#ifdef __s390x__
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
#else
......
......@@ -27,9 +27,16 @@
extern inline unsigned long virt_to_phys(volatile void * address)
{
unsigned long real_address;
__asm__ (" lra %0,0(%1)\n"
__asm__ (
#ifndef __s390x__
" lra %0,0(%1)\n"
" jz 0f\n"
" sr %0,%0\n"
#else /* __s390x__ */
" lrag %0,0(%1)\n"
" jz 0f\n"
" slgr %0,%0\n"
#endif /* __s390x__ */
"0:"
: "=a" (real_address) : "a" (address) : "cc" );
return real_address;
......
......@@ -22,6 +22,7 @@ struct ipc_kludge {
#define SEMOP 1
#define SEMGET 2
#define SEMCTL 3
#define SEMTIMEDOP 4
#define MSGSND 11
#define MSGRCV 12
#define MSGGET 13
......
......@@ -21,7 +21,9 @@ struct ipc64_perm
__kernel_mode_t mode;
unsigned short __pad1;
unsigned short seq;
#ifndef __s390x__
unsigned short __pad2;
#endif /* ! __s390x__ */
unsigned long __unused1;
unsigned long __unused2;
};
......
......@@ -11,6 +11,7 @@
#ifndef _ASM_S390_LOWCORE_H
#define _ASM_S390_LOWCORE_H
#ifndef __s390x__
#define __LC_EXT_OLD_PSW 0x018
#define __LC_SVC_OLD_PSW 0x020
#define __LC_PGM_OLD_PSW 0x028
......@@ -21,43 +22,76 @@
#define __LC_PGM_NEW_PSW 0x068
#define __LC_MCK_NEW_PSW 0x070
#define __LC_IO_NEW_PSW 0x078
#else /* !__s390x__ */
#define __LC_EXT_OLD_PSW 0x0130
#define __LC_SVC_OLD_PSW 0x0140
#define __LC_PGM_OLD_PSW 0x0150
#define __LC_MCK_OLD_PSW 0x0160
#define __LC_IO_OLD_PSW 0x0170
#define __LC_EXT_NEW_PSW 0x01b0
#define __LC_SVC_NEW_PSW 0x01c0
#define __LC_PGM_NEW_PSW 0x01d0
#define __LC_MCK_NEW_PSW 0x01e0
#define __LC_IO_NEW_PSW 0x01f0
#endif /* !__s390x__ */
#define __LC_EXT_PARAMS 0x080
#define __LC_CPU_ADDRESS 0x084
#define __LC_EXT_INT_CODE 0x086
#define __LC_SVC_INT_CODE 0x08B
#define __LC_SVC_ILC 0x088
#define __LC_SVC_INT_CODE 0x08A
#define __LC_PGM_ILC 0x08C
#define __LC_PGM_INT_CODE 0x08E
#define __LC_TRANS_EXC_ADDR 0x090
#define __LC_SUBCHANNEL_ID 0x0B8
#define __LC_SUBCHANNEL_NR 0x0BA
#define __LC_IO_INT_PARM 0x0BC
#define __LC_IO_INT_WORD 0x0C0
#define __LC_MCCK_CODE 0x0E8
#define __LC_AREGS_SAVE_AREA 0x120
#define __LC_CREGS_SAVE_AREA 0x1C0
#define __LC_RETURN_PSW 0x200
#define __LC_IRB 0x208
#define __LC_IRB 0x210
#define __LC_DIAG44_OPCODE 0x250
#define __LC_SAVE_AREA 0xC00
#ifndef __s390x__
#define __LC_KERNEL_STACK 0xC40
#define __LC_ASYNC_STACK 0xC44
#define __LC_CPUID 0xC60
#define __LC_CPUADDR 0xC68
#define __LC_IPLDEV 0xC7C
#define __LC_JIFFY_TIMER 0xC80
#else /* __s390x__ */
#define __LC_KERNEL_STACK 0xD40
#define __LC_ASYNC_STACK 0xD48
#define __LC_CPUID 0xD90
#define __LC_CPUADDR 0xD98
#define __LC_IPLDEV 0xDB8
#define __LC_JIFFY_TIMER 0xDC0
#endif /* __s390x__ */
#define __LC_PANIC_MAGIC 0xE00
#ifndef __s390x__
#define __LC_PFAULT_INTPARM 0x080
#define __LC_AREGS_SAVE_AREA 0x120
#define __LC_CREGS_SAVE_AREA 0x1C0
#else /* __s390x__ */
#define __LC_PFAULT_INTPARM 0x11B8
#define __LC_AREGS_SAVE_AREA 0x1340
#define __LC_CREGS_SAVE_AREA 0x1380
#endif /* __s390x__ */
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <asm/processor.h>
#include <linux/types.h>
#include <asm/atomic.h>
#include <asm/processor.h>
#include <asm/sigp.h>
void restart_int_handler(void);
......@@ -69,6 +103,7 @@ void io_int_handler(void);
struct _lowcore
{
#ifndef __s390x__
/* prefix area: defined by architecture */
psw_t restart_psw; /* 0x000 */
__u32 ccw2[4]; /* 0x008 */
......@@ -142,6 +177,101 @@ struct _lowcore
/* Align to the top 1k of prefix area */
__u8 pad12[0x1000-0xe04]; /* 0xe04 */
#else /* !__s390x__ */
/* prefix area: defined by architecture */
__u32 ccw1[2]; /* 0x000 */
__u32 ccw2[4]; /* 0x008 */
__u8 pad1[0x80-0x18]; /* 0x018 */
__u32 ext_params; /* 0x080 */
__u16 cpu_addr; /* 0x084 */
__u16 ext_int_code; /* 0x086 */
__u16 svc_ilc; /* 0x088 */
__u16 svc_code; /* 0x08a */
__u16 pgm_ilc; /* 0x08c */
__u16 pgm_code; /* 0x08e */
__u32 data_exc_code; /* 0x090 */
__u16 mon_class_num; /* 0x094 */
__u16 per_perc_atmid; /* 0x096 */
addr_t per_address; /* 0x098 */
__u8 exc_access_id; /* 0x0a0 */
__u8 per_access_id; /* 0x0a1 */
__u8 op_access_id; /* 0x0a2 */
__u8 ar_access_id; /* 0x0a3 */
__u8 pad2[0xA8-0xA4]; /* 0x0a4 */
addr_t trans_exc_code; /* 0x0A0 */
addr_t monitor_code; /* 0x09c */
__u16 subchannel_id; /* 0x0b8 */
__u16 subchannel_nr; /* 0x0ba */
__u32 io_int_parm; /* 0x0bc */
__u32 io_int_word; /* 0x0c0 */
__u8 pad3[0xc8-0xc4]; /* 0x0c4 */
__u32 stfl_fac_list; /* 0x0c8 */
__u8 pad4[0xe8-0xcc]; /* 0x0cc */
__u32 mcck_interruption_code[2]; /* 0x0e8 */
__u8 pad5[0xf4-0xf0]; /* 0x0f0 */
__u32 external_damage_code; /* 0x0f4 */
addr_t failing_storage_address; /* 0x0f8 */
__u8 pad6[0x120-0x100]; /* 0x100 */
psw_t restart_old_psw; /* 0x120 */
psw_t external_old_psw; /* 0x130 */
psw_t svc_old_psw; /* 0x140 */
psw_t program_old_psw; /* 0x150 */
psw_t mcck_old_psw; /* 0x160 */
psw_t io_old_psw; /* 0x170 */
__u8 pad7[0x1a0-0x180]; /* 0x180 */
psw_t restart_psw; /* 0x1a0 */
psw_t external_new_psw; /* 0x1b0 */
psw_t svc_new_psw; /* 0x1c0 */
psw_t program_new_psw; /* 0x1d0 */
psw_t mcck_new_psw; /* 0x1e0 */
psw_t io_new_psw; /* 0x1f0 */
psw_t return_psw; /* 0x200 */
__u8 irb[64]; /* 0x210 */
__u32 diag44_opcode; /* 0x250 */
__u8 pad8[0xc00-0x254]; /* 0x254 */
/* System info area */
__u64 save_area[16]; /* 0xc00 */
__u8 pad9[0xd40-0xc80]; /* 0xc80 */
__u64 kernel_stack; /* 0xd40 */
__u64 async_stack; /* 0xd48 */
/* entry.S sensitive area start */
__u8 pad10[0xd80-0xd50]; /* 0xd64 */
struct cpuinfo_S390 cpu_data; /* 0xd80 */
__u32 ipl_device; /* 0xdb8 */
__u32 pad11; /* 0xdbc */
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer; /* 0xdc0 */
__u64 ext_call_fast; /* 0xdc8 */
__u8 pad12[0xe00-0xdd0]; /* 0xdd0 */
/* 0xe00 is used as indicator for dump tools */
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
__u8 pad13[0x1200-0xe04]; /* 0xe04 */
/* System info area */
__u64 floating_pt_save_area[16]; /* 0x1200 */
__u64 gpregs_save_area[16]; /* 0x1280 */
__u32 st_status_fixed_logout[4]; /* 0x1300 */
__u8 pad14[0x1318-0x1310]; /* 0x1310 */
__u32 prefixreg_save_area; /* 0x1318 */
__u32 fpt_creg_save_area; /* 0x131c */
__u8 pad15[0x1324-0x1320]; /* 0x1320 */
__u32 tod_progreg_save_area; /* 0x1324 */
__u32 cpu_timer_save_area[2]; /* 0x1328 */
__u32 clock_comp_save_area[2]; /* 0x1330 */
__u8 pad16[0x1340-0x1338]; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
/* align to the top of the prefix area */
__u8 pad17[0x2000-0x1400]; /* 0x1400 */
#endif /* !__s390x__ */
} __attribute__((packed)); /* End structure*/
#define S390_lowcore (*((struct _lowcore *) 0))
......
......@@ -27,12 +27,20 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned long pgd;
if (prev != next) {
#ifndef __s390x__
pgd = (__pa(next->pgd)&PAGE_MASK) |
(_SEGMENT_TABLE|USER_STD_MASK);
/* Load page tables */
asm volatile(" lctl 7,7,%0\n" /* secondary space */
" lctl 13,13,%0\n" /* home space */
: : "m" (pgd) );
#else /* __s390x__ */
pgd = (__pa(next->pgd)&PAGE_MASK) | (_REGION_TABLE|USER_STD_MASK);
/* Load page tables */
asm volatile(" lctlg 7,7,%0\n" /* secondary space */
" lctlg 13,13,%0\n" /* home space */
: : "m" (pgd) );
#endif /* __s390x__ */
}
set_bit(cpu, &next->cpu_vm_mask);
}
......
......@@ -28,7 +28,7 @@ struct mod_arch_specific
struct mod_arch_syminfo *syminfo;
};
#ifdef CONFIG_ARCH_S390X
#ifdef __s390x__
#define ElfW(x) Elf64_ ## x
#define ELFW(x) ELF64_ ## x
#else
......@@ -36,8 +36,11 @@ struct mod_arch_specific
#define ELFW(x) ELF32_ ## x
#endif
#define Elf_Addr ElfW(Addr)
#define Elf_Rela ElfW(Rela)
#define Elf_Shdr ElfW(Shdr)
#define Elf_Sym ElfW(Sym)
#define Elf_Ehdr ElfW(Ehdr)
#define ELF_R_SYM ELFW(R_SYM)
#define ELF_R_TYPE ELFW(R_TYPE)
#endif /* _ASM_S390_MODULE_H */
......@@ -14,11 +14,17 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
#ifndef __s390x__
unsigned long __unused1;
#endif /* ! __s390x__ */
__kernel_time_t msg_rtime; /* last msgrcv time */
#ifndef __s390x__
unsigned long __unused2;
#endif /* ! __s390x__ */
__kernel_time_t msg_ctime; /* last change time */
#ifndef __s390x__
unsigned long __unused3;
#endif /* ! __s390x__ */
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
......
......@@ -20,6 +20,8 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#ifndef __s390x__
static inline void clear_page(void *page)
{
register_pair rp;
......@@ -59,6 +61,48 @@ static inline void copy_page(void *to, void *from)
: "memory" );
}
#else /* __s390x__ */
static inline void clear_page(void *page)
{
asm volatile (" lgr 2,%0\n"
" lghi 3,4096\n"
" slgr 1,1\n"
" mvcl 2,0"
: : "a" ((void *) (page))
: "memory", "cc", "1", "2", "3" );
}
static inline void copy_page(void *to, void *from)
{
if (MACHINE_HAS_MVPG)
asm volatile (" sgr 0,0\n"
" mvpg %0,%1"
: : "a" ((void *)(to)), "a" ((void *)(from))
: "memory", "cc", "0" );
else
asm volatile (" mvc 0(256,%0),0(%1)\n"
" mvc 256(256,%0),256(%1)\n"
" mvc 512(256,%0),512(%1)\n"
" mvc 768(256,%0),768(%1)\n"
" mvc 1024(256,%0),1024(%1)\n"
" mvc 1280(256,%0),1280(%1)\n"
" mvc 1536(256,%0),1536(%1)\n"
" mvc 1792(256,%0),1792(%1)\n"
" mvc 2048(256,%0),2048(%1)\n"
" mvc 2304(256,%0),2304(%1)\n"
" mvc 2560(256,%0),2560(%1)\n"
" mvc 2816(256,%0),2816(%1)\n"
" mvc 3072(256,%0),3072(%1)\n"
" mvc 3328(256,%0),3328(%1)\n"
" mvc 3584(256,%0),3584(%1)\n"
" mvc 3840(256,%0),3840(%1)\n"
: : "a"((void *)(to)),"a"((void *)(from))
: "memory" );
}
#endif /* __s390x__ */
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
......@@ -79,7 +123,15 @@ extern __inline__ int get_order(unsigned long size)
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pte; } pte_t;
#define pte_val(x) ((x).pte)
#define pgprot_val(x) ((x).pgprot)
#ifndef __s390x__
typedef struct { unsigned long pmd; } pmd_t;
typedef struct {
unsigned long pgd0;
......@@ -87,12 +139,23 @@ typedef struct {
unsigned long pgd2;
unsigned long pgd3;
} pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd0)
#define pgprot_val(x) ((x).pgprot)
#else /* __s390x__ */
typedef struct {
unsigned long pmd0;
unsigned long pmd1;
} pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pmd_val(x) ((x).pmd0)
#define pmd_val1(x) ((x).pmd1)
#define pgd_val(x) ((x).pgd)
#endif /* __s390x__ */
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
......
/*
* include/asm-s390/bugs.h
* include/asm-s390/pgalloc.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
......@@ -32,35 +32,79 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd_t *pgd;
int i;
#ifndef __s390x__
pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
if (pgd != NULL)
for (i = 0; i < USER_PTRS_PER_PGD; i++)
pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
#else /* __s390x__ */
pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
if (pgd != NULL)
for (i = 0; i < PTRS_PER_PGD; i++)
pgd_clear(pgd + i);
#endif /* __s390x__ */
return pgd;
}
static inline void pgd_free(pgd_t *pgd)
{
#ifndef __s390x__
free_pages((unsigned long) pgd, 1);
#else /* __s390x__ */
free_pages((unsigned long) pgd, 2);
#endif /* __s390x__ */
}
#ifndef __s390x__
/*
* page middle directory allocation/free routines.
* We don't use pmd cache, so these are dummy routines. This
* We use pmd cache only on s390x, so these are dummy routines. This
* code never triggers because the pgd will always be present.
*/
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#else /* __s390x__ */
static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
pmd_t *pmd;
int i;
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2);
if (pmd != NULL) {
for (i=0; i < PTRS_PER_PMD; i++)
pmd_clear(pmd+i);
}
return pmd;
}
static inline void pmd_free (pmd_t *pmd)
{
free_pages((unsigned long) pmd, 2);
}
#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
}
#endif /* __s390x__ */
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
#ifndef __s390x__
pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
#else /* __s390x__ */
pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
#endif /* __s390x__ */
}
static inline void
......@@ -122,11 +166,16 @@ static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
#ifndef __s390x__
if (!(pte_val(pte) & _PAGE_INVALID)) {
/* S390 has 1mb segments, we are emulating 4MB segments */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
__asm__ __volatile__ ("ipte %0,%1" : : "a" (pto), "a" (address));
}
#else /* __s390x__ */
if (!(pte_val(pte) & _PAGE_INVALID))
__asm__ __volatile__ ("ipte %0,%1" : : "a" (ptep), "a" (address));
#endif /* __s390x__ */
pte_clear(ptep);
return pte;
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment