Commit 71000494 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390/s390x unification (5/7)

Merge s390x and s390 to one architecture.
parent 042b54a1
This diff is collapsed.
......@@ -128,15 +128,28 @@ void do_settimeofday(struct timeval *tv)
write_sequnlock_irq(&xtime_lock);
}
static inline __u32 div64_32(__u64 dividend, __u32 divisor)
#ifndef CONFIG_ARCH_S390X
static inline __u32
__calculate_ticks(__u64 elapsed)
{
register_pair rp;
rp.pair = dividend;
asm ("dr %0,%1" : "+d" (rp) : "d" (divisor));
rp.pair = elapsed >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (CLK_TICKS_PER_JIFFY >> 1));
return rp.subreg.odd;
}
#else /* CONFIG_ARCH_S390X */
static inline __u32
__calculate_ticks(__u64 elapsed)
{
return elapsed / CLK_TICKS_PER_JIFFY;
}
#endif /* CONFIG_ARCH_S390X */
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
......@@ -150,7 +163,7 @@ static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code)
asm volatile ("STCK 0(%0)" : : "a" (&tmp) : "memory", "cc");
tmp = tmp - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than one tick ? */
ticks = div64_32(tmp >> 1, CLK_TICKS_PER_JIFFY >> 1);
ticks = __calculate_ticks(tmp);
S390_lowcore.jiffy_timer +=
CLK_TICKS_PER_JIFFY * (__u64) ticks;
} else {
......@@ -175,7 +188,7 @@ static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code)
tmp = S390_lowcore.jiffy_timer - xtime_cc;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
xticks = div64_32(tmp >> 1, CLK_TICKS_PER_JIFFY >> 1);
xticks = __calculate_ticks(tmp);
xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
} else {
xticks = 1;
......@@ -208,9 +221,9 @@ void init_cpu_timer(void)
timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
asm volatile ("SCKC %0" : : "m" (timer));
/* allow clock comparator timer interrupt */
asm volatile ("STCTL 0,0,%0" : "=m" (cr0) : : "memory");
__ctl_store(cr0, 0, 0);
cr0 |= 0x800;
asm volatile ("LCTL 0,0,%0" : : "m" (cr0) : "memory");
__ctl_load(cr0, 0, 0);
}
/*
......
......@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......@@ -53,6 +54,7 @@ int sysctl_userprocess_debug = 0;
extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_segment_exception;
extern pgm_check_handler_t do_region_exception;
extern pgm_check_handler_t do_page_exception;
extern pgm_check_handler_t do_pseudo_page_fault;
#ifdef CONFIG_PFAULT
......@@ -62,31 +64,37 @@ extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
static ext_int_info_t ext_int_pfault;
#endif
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
#ifndef CONFIG_ARCH_S390X
#define RET_ADDR 56
#define FOURLONG "%08lx %08lx %08lx %08lx\n"
static int kstack_depth_to_print = 12;
#else /* CONFIG_ARCH_S390X */
#define RET_ADDR 112
#define FOURLONG "%016lx %016lx %016lx %016lx\n"
static int kstack_depth_to_print = 20;
#endif /* CONFIG_ARCH_S390X */
void show_trace(unsigned long * stack)
{
unsigned long backchain, low_addr, high_addr, ret_addr;
int i;
if (!stack)
stack = (unsigned long*)&stack;
stack = *stack_pointer;
printk("Call Trace: ");
printk("Call Trace:\n");
low_addr = ((unsigned long) stack) & PSW_ADDR_INSN;
high_addr = (low_addr & (-THREAD_SIZE)) + THREAD_SIZE;
/* Skip the first frame (biased stack) */
backchain = *((unsigned long *) low_addr) & PSW_ADDR_INSN;
/* Print up to 8 lines */
for (i = 0; i < 8; i++) {
if (backchain < low_addr || backchain >= high_addr)
break;
ret_addr = *((unsigned long *) (backchain+56)) & PSW_ADDR_INSN;
if (!kernel_text_address(ret_addr))
break;
if (i && ((i % 6) == 0))
printk("\n ");
printk("[<%08lx>] ", ret_addr);
while (backchain > low_addr && backchain <= high_addr) {
ret_addr = *((unsigned long *) (backchain+RET_ADDR)) & PSW_ADDR_INSN;
printk(" [<%016lx>] ", ret_addr);
print_symbol("%s\n", ret_addr);
low_addr = backchain;
backchain = *((unsigned long *) backchain) & PSW_ADDR_INSN;
}
......@@ -113,15 +121,15 @@ void show_stack(unsigned long *sp)
// back trace for this cpu.
if(sp == NULL)
sp = (unsigned long*) &sp;
sp = *stack_pointer;
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
if (i && ((i % 8) == 0))
if (i && ((i * sizeof (long) % 32) == 0))
printk("\n ");
printk("%08lx ", *stack++);
printk("%p ", (void *)*stack++);
}
printk("\n");
show_trace(sp);
......@@ -142,17 +150,18 @@ void show_registers(struct pt_regs *regs)
int i;
mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
printk("%s PSW : %08lx %08lx\n",
mode, (unsigned long) regs->psw.mask,
(unsigned long) regs->psw.addr);
printk("%s GPRS: %08x %08x %08x %08x\n", mode,
printk("%s PSW : %p %p\n",
mode, (void *) regs->psw.mask,
(void *) regs->psw.addr);
printk("%s GPRS: " FOURLONG, mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" %08x %08x %08x %08x\n",
printk(" " FOURLONG,
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
printk(" %08x %08x %08x %08x\n",
printk(" " FOURLONG,
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
printk(" %08x %08x %08x %08x\n",
printk(" " FOURLONG,
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
printk("%s ACRS: %08x %08x %08x %08x\n", mode,
regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
printk(" %08x %08x %08x %08x\n",
......@@ -191,21 +200,21 @@ char *task_show_regs(struct task_struct *task, char *buffer)
struct pt_regs *regs;
regs = __KSTK_PTREGS(task);
buffer += sprintf(buffer, "task: %08lx, ksp: %08x\n",
(unsigned long) task, task->thread.ksp);
buffer += sprintf(buffer, "User PSW : %08lx %08lx\n",
(unsigned long) regs->psw.mask,
(unsigned long) regs->psw.addr);
buffer += sprintf(buffer, "User GPRS: %08x %08x %08x %08x\n",
buffer += sprintf(buffer, "task: %p, ksp: %p\n",
task, (void *)task->thread.ksp);
buffer += sprintf(buffer, "User PSW : %p %p\n",
(void *) regs->psw.mask, (void *)regs->psw.addr);
buffer += sprintf(buffer, "User GPRS: " FOURLONG,
regs->gprs[0], regs->gprs[1],
regs->gprs[2], regs->gprs[3]);
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
buffer += sprintf(buffer, " " FOURLONG,
regs->gprs[4], regs->gprs[5],
regs->gprs[6], regs->gprs[7]);
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
buffer += sprintf(buffer, " " FOURLONG,
regs->gprs[8], regs->gprs[9],
regs->gprs[10], regs->gprs[11]);
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
buffer += sprintf(buffer, " " FOURLONG,
regs->gprs[12], regs->gprs[13],
regs->gprs[14], regs->gprs[15]);
buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
......@@ -271,9 +280,9 @@ static void inline do_trap(long interruption_code, int signr, char *str,
#endif
} else {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->psw.addr & 0x7fffffff);
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
if (fixup)
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE31;
regs->psw.addr = fixup->fixup | ~PSW_ADDR_INSN;
else
die(str, regs, interruption_code);
}
......@@ -376,7 +385,7 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
__u16 *location;
int signal = 0;
location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc);
location = (__u16 *) get_check_address(regs);
/*
* We got all needed information from the lowcore and can
......@@ -428,7 +437,6 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
}
#ifdef CONFIG_MATHEMU
asmlinkage void
specification_exception(struct pt_regs * regs, long interruption_code)
......@@ -606,33 +614,40 @@ void __init trap_init(void)
pgm_check_table[9] = &divide_exception;
pgm_check_table[0x10] = &do_segment_exception;
pgm_check_table[0x11] = &do_page_exception;
pgm_check_table[0x10] = &do_segment_exception;
pgm_check_table[0x11] = &do_page_exception;
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
#ifndef CONFIG_ARCH_S390X
pgm_check_table[0x14] = &do_pseudo_page_fault;
#else /* CONFIG_ARCH_S390X */
pgm_check_table[0x38] = &addressing_exception;
pgm_check_table[0x3B] = &do_region_exception;
#endif /* CONFIG_ARCH_S390X */
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &privileged_op;
#ifdef CONFIG_PFAULT
if (MACHINE_IS_VM) {
/* request the 0x2603 external interrupt */
if (register_early_external_interrupt(0x2603, pfault_interrupt,
&ext_int_pfault) != 0)
panic("Couldn't request external interrupt 0x2603");
/*
* First try to get pfault pseudo page faults going.
* If this isn't available turn on pagex page faults.
*/
if (pfault_init() != 0) {
/* Tough luck, no pfault. */
unregister_early_external_interrupt(0x2603,
pfault_interrupt,
&ext_int_pfault);
cpcmd("SET PAGEX ON", NULL, 0);
}
}
#else
if (MACHINE_IS_VM)
#ifdef CONFIG_PFAULT
/* request the 0x2603 external interrupt */
if (register_early_external_interrupt(0x2603, pfault_interrupt,
&ext_int_pfault) != 0)
panic("Couldn't request external interrupt 0x2603");
if (pfault_init() == 0)
return;
/* Tough luck, no pfault. */
unregister_early_external_interrupt(0x2603, pfault_interrupt,
&ext_int_pfault);
#endif
#ifndef CONFIG_ARCH_S390X
cpcmd("SET PAGEX ON", NULL, 0);
#endif
}
}
......
......@@ -6,4 +6,6 @@ L_TARGET = lib.a
EXTRA_AFLAGS := -traditional
obj-y = delay.o memset.o strcmp.o strncpy.o uaccess.o
obj-y += delay.o
obj-$(CONFIG_ARCH_S390_31) += memset.o strcmp.o strncpy.o uaccess.o
obj-$(CONFIG_ARCH_S390X) += memset64.o strcmp64.o strncpy64.o uaccess64.o
/*
* arch/s390/lib/memset.S
* S390 fast memset routine
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
/*
* R2 = address to memory area
* R3 = byte to fill memory with
* R4 = number of bytes to fill
*/
.globl memset
memset:
LTGR 4,4
JZ memset_end
LGR 0,2 # save pointer to memory area
LGR 1,3 # move pad byte to R1
LGR 3,4
SGR 4,4 # no source for MVCLE, only a pad byte
SGR 5,5
MVCLE 2,4,0(1) # thats it, MVCLE is your friend
JO .-4
LGR 2,0 # return pointer to mem.
memset_end:
BR 14
/*
* arch/s390/lib/strcmp.S
* S390 strcmp routine
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
/*
* R2 = address of compare string
* R3 = address of test string
*/
.globl strcmp
strcmp:
SGR 0,0
SGR 1,1
CLST 2,3
JO .-4
JE strcmp_equal
IC 0,0(3)
IC 1,0(2)
SGR 1,0
strcmp_equal:
LGR 2,1
BR 14
/*
* arch/s390/kernel/strncpy.S
* S390 strncpy routine
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
/*
* R2 = address of destination
* R3 = address of source string
* R4 = max number of bytes to copy
*/
.globl strncpy
strncpy:
LGR 1,2 # don't touch address in R2
LTR 4,4
JZ strncpy_exit # 0 bytes -> nothing to do
SGR 0,0
strncpy_loop:
ICM 0,1,0(3) # ICM sets the cc, IC does not
LA 3,1(3)
STC 0,0(1)
LA 1,1(1)
JZ strncpy_exit # ICM inserted a 0x00
BRCTG 4,strncpy_loop # R4 -= 1, jump to strncpy_loop if > 0
strncpy_exit:
BR 14
/*
* arch/s390x/lib/uaccess.S
* __copy_{from|to}_user functions.
*
* s390
* Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* These functions have standard call interface
*/
#include <asm/lowcore.h>
.text
.align 4
.globl __copy_from_user_asm
__copy_from_user_asm:
lgr %r5,%r3
sacf 512
0: mvcle %r2,%r4,0
jo 0b
1: sacf 0
lgr %r2,%r5
br %r14
2: lghi %r1,-4096
lgr %r3,%r4
slgr %r3,%r1 # %r3 = %r4 + 4096
ngr %r3,%r1 # %r3 = (%r4 + 4096) & -4096
slgr %r3,%r4 # %r3 = #bytes to next user page boundary
clgr %r5,%r3 # copy crosses next page boundary ?
jnh 1b # no, this page fauled
# The page after the current user page might have faulted.
# We cant't find out which page because the program check handler
# might have callled schedule, destroying all lowcore information.
# We retry with the shortened length.
3: mvcle %r2,%r4,0
jo 3b
j 1b
.section __ex_table,"a"
.quad 0b,2b
.quad 3b,1b
.previous
.align 4
.text
.globl __copy_to_user_asm
__copy_to_user_asm:
lgr %r5,%r3
sacf 512
0: mvcle %r4,%r2,0
jo 0b
1: sacf 0
lgr %r2,%r3
br %r14
2: lghi %r1,-4096
lgr %r5,%r4
slgr %r5,%r1 # %r5 = %r4 + 4096
ngr %r5,%r1 # %r5 = (%r4 + 4096) & -4096
slgr %r5,%r4 # %r5 = #bytes to next user page boundary
clgr %r3,%r5 # copy crosses next page boundary ?
jnh 1b # no, the current page fauled
# The page after the current user page might have faulted.
# We cant't find out which page because the program check handler
# might have callled schedule, destroying all lowcore information.
# We retry with the shortened length.
3: mvcle %r4,%r2,0
jo 3b
j 1b
.section __ex_table,"a"
.quad 0b,2b
.quad 3b,1b
.previous
.align 4
.text
.globl __clear_user_asm
__clear_user_asm:
lgr %r4,%r2
lgr %r5,%r3
sgr %r2,%r2
sgr %r3,%r3
sacf 512
0: mvcle %r4,%r2,0
jo 0b
1: sacf 0
br %r14
2: lgr %r2,%r5
lghi %r1,-4096
slgr %r5,%r1 # %r5 = %r4 + 4096
ngr %r5,%r1 # %r5 = (%r4 + 4096) & -4096
slgr %r5,%r4 # %r5 = #bytes to next user page boundary
clgr %r2,%r5 # copy crosses next page boundary ?
jnh 1b # no, the current page fauled
# The page after the current user page might have faulted.
# We cant't find out which page because the program check handler
# might have callled schedule, destroying all lowcore information.
# We retry with the shortened length.
slgr %r2,%r5
3: mvcle %r4,%r2,0
jo 3b
j 1b
4: algr %r2,%r5
j 1b
.section __ex_table,"a"
.quad 0b,2b
.quad 3b,4b
.previous
......@@ -31,6 +31,18 @@
#include <asm/pgtable.h>
#include <asm/hardirq.h>
#ifndef CONFIG_ARCH_S390X
#define __FAIL_ADDR_MASK 0x7ffff000
#define __FIXUP_MASK 0x7fffffff
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
#else /* CONFIG_ARCH_S390X */
#define __FAIL_ADDR_MASK -4096L
#define __FIXUP_MASK ~0L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
#endif /* CONFIG_ARCH_S390X */
#ifdef CONFIG_SYSCTL
extern int sysctl_userprocess_debug;
#endif
......@@ -143,6 +155,7 @@ static void force_sigsegv(struct pt_regs *regs, unsigned long error_code,
* 04 Protection -> Write-Protection (suprression)
* 10 Segment translation -> Not present (nullification)
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
extern inline void do_exception(struct pt_regs *regs, unsigned long error_code)
{
......@@ -182,7 +195,7 @@ extern inline void do_exception(struct pt_regs *regs, unsigned long error_code)
* more specific the segment and page table portion of
* the address
*/
address = S390_lowcore.trans_exc_code&0x7ffff000;
address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK;
user_address = check_user_space(regs, error_code);
/*
......@@ -267,9 +280,9 @@ extern inline void do_exception(struct pt_regs *regs, unsigned long error_code)
no_context:
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->psw.addr & 0x7fffffff);
fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK);
if (fixup) {
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE31;
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
return;
}
......@@ -279,10 +292,10 @@ extern inline void do_exception(struct pt_regs *regs, unsigned long error_code)
*/
if (user_address == 0)
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %08lx\n", address);
" at virtual kernel address %p\n", (void *)address);
else
printk(KERN_ALERT "Unable to handle kernel paging request"
" at virtual user address %08lx\n", address);
" at virtual user address %p\n", (void *)address);
die("Oops", regs, error_code);
do_exit(SIGKILL);
......@@ -335,6 +348,16 @@ void do_page_exception(struct pt_regs *regs, unsigned long error_code)
do_exception(regs, 0x11);
}
#ifdef CONFIG_ARCH_S390X
void
do_region_exception(struct pt_regs *regs, unsigned long error_code)
{
do_exception(regs, 0x3b);
}
#else /* CONFIG_ARCH_S390X */
typedef struct _pseudo_wait_t {
struct _pseudo_wait_t *next;
wait_queue_head_t queue;
......@@ -435,6 +458,7 @@ do_pseudo_page_fault(struct pt_regs *regs, unsigned long error_code)
wait_event(wait_struct.queue, wait_struct.resolved);
}
}
#endif /* CONFIG_ARCH_S390X */
#ifdef CONFIG_PFAULT
/*
......@@ -464,7 +488,8 @@ typedef struct {
int pfault_init(void)
{
pfault_refbk_t refbk =
{ 0x258, 0, 5, 2, __LC_KERNEL_STACK, 1ULL << 48, 1ULL << 48, 0ULL };
{ 0x258, 0, 5, 2, __LC_KERNEL_STACK, 1ULL << 48, 1ULL << 48,
__PF_RES_FIELD };
int rc;
if (pfault_disable)
......@@ -476,7 +501,11 @@ int pfault_init(void)
"2:\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
#ifndef CONFIG_ARCH_S390X
" .long 0b,1b\n"
#else /* CONFIG_ARCH_S390X */
" .quad 0b,1b\n"
#endif /* CONFIG_ARCH_S390X */
".previous"
: "=d" (rc) : "a" (&refbk) : "cc" );
__ctl_set_bit(0, 9);
......@@ -496,7 +525,11 @@ void pfault_fini(void)
"0:\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
#ifndef CONFIG_ARCH_S390X
" .long 0b,0b\n"
#else /* CONFIG_ARCH_S390X */
" .quad 0b,0b\n"
#endif /* CONFIG_ARCH_S390X */
".previous"
: : "a" (&refbk) : "cc" );
}
......@@ -516,7 +549,7 @@ pfault_interrupt(struct pt_regs *regs, __u16 error_code)
* external interrupt.
*/
subcode = S390_lowcore.cpu_addr;
if ((subcode & 0xff00) != 0x0200)
if ((subcode & 0xff00) != __SUBCODE_MASK)
return;
/*
......@@ -524,7 +557,7 @@ pfault_interrupt(struct pt_regs *regs, __u16 error_code)
*/
tsk = (struct task_struct *)
(*((unsigned long *) __LC_PFAULT_INTPARM) - THREAD_SIZE);
/*
* We got all needed information from the lowcore and can
* now safely switch on interrupts.
......
......@@ -79,13 +79,12 @@ extern unsigned long __init_begin;
extern unsigned long __init_end;
/*
* paging_init() sets up the page tables - note that the first 4MB are
* already mapped by head.S.
* paging_init will erase this initial mapping
* paging_init() sets up the page tables
*/
unsigned long last_valid_pfn;
#ifndef CONFIG_ARCH_S390X
void __init paging_init(void)
{
pgd_t * pg_dir;
......@@ -98,12 +97,12 @@ void __init paging_init(void)
static const int ssm_mask = 0x04000000L;
/* unmap whole virtual address space */
pg_dir = swapper_pg_dir;
for (i=0;i<KERNEL_PGD_PTRS;i++)
pmd_clear((pmd_t*)pg_dir++);
/*
* map whole physical memory to virtual memory (identity mapping)
*/
......@@ -146,13 +145,92 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = max_low_pfn;
free_area_init(zones_size);
}
return;
}
#else /* CONFIG_ARCH_S390X */
void __init paging_init(void)
{
pgd_t * pg_dir;
pmd_t * pm_dir;
pte_t * pt_dir;
pte_t pte;
int i,j,k;
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
static const int ssm_mask = 0x04000000L;
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
unsigned long dma_pfn, high_pfn;
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
high_pfn = max_low_pfn;
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
else {
zones_size[ZONE_DMA] = dma_pfn;
zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
}
/* Initialize mem_map[]. */
free_area_init(zones_size);
/*
* map whole physical memory to virtual memory (identity mapping)
*/
pg_dir = swapper_pg_dir;
for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
if (pfn >= max_low_pfn) {
pgd_clear(pg_dir);
continue;
}
pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4);
pgd_populate(&init_mm, pg_dir, pm_dir);
for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
if (pfn >= max_low_pfn) {
pmd_clear(pm_dir);
continue;
}
pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&pte);
continue;
}
set_pte(pt_dir, pte);
pfn++;
}
}
}
/* enable virtual mapping in kernel mode */
__asm__ __volatile__("lctlg 1,1,%0\n\t"
"lctlg 7,7,%0\n\t"
"lctlg 13,13,%0\n\t"
"ssm %1"
: :"m" (pgdir_k), "m" (ssm_mask));
local_flush_tlb();
return;
}
#endif /* CONFIG_ARCH_S390X */
void __init mem_init(void)
{
int codesize, reservedpages, datasize, initsize;
unsigned long codesize, reservedpages, datasize, initsize;
max_mapnr = num_physpages = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
......@@ -168,7 +246,7 @@ void __init mem_init(void)
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
codesize >> 10,
......
......@@ -3,11 +3,20 @@
*/
#include <asm-generic/vmlinux.lds.h>
#include <linux/config.h>
#ifndef CONFIG_ARCH_S390X
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
OUTPUT_ARCH(s390)
ENTRY(_start)
jiffies = jiffies_64 + 4;
#else
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390)
ENTRY(_start)
jiffies = jiffies_64;
#endif
SECTIONS
{
. = 0x00000000;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment