Commit ccdcef72 authored by Dale Farnsworth's avatar Dale Farnsworth Committed by Paul Mackerras

powerpc/32: Add the ability for a classic ppc kernel to be loaded at 32M

Add the ability for a classic ppc kernel to be loaded at an address
of 32MB.  This done by fixing a few places that assume we are loaded
at address 0, and by changing several uses of KERNELBASE to use
PAGE_OFFSET, instead.
Signed-off-by: default avatarDale Farnsworth <dale@farnsworth.org>
Signed-off-by: default avatarAnton Vorontsov <avorontsov@ru.mvista.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 01695a96
...@@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) ...@@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define fromreal(rd) tovirt(rd,rd) #define fromreal(rd) tovirt(rd,rd)
#define tophys(rd,rs) \ #define tophys(rd,rs) \
0: addis rd,rs,-KERNELBASE@h; \ 0: addis rd,rs,-PAGE_OFFSET@h; \
.section ".vtop_fixup","aw"; \ .section ".vtop_fixup","aw"; \
.align 1; \ .align 1; \
.long 0b; \ .long 0b; \
.previous .previous
#define tovirt(rd,rs) \ #define tovirt(rd,rs) \
0: addis rd,rs,KERNELBASE@h; \ 0: addis rd,rs,PAGE_OFFSET@h; \
.section ".ptov_fixup","aw"; \ .section ".ptov_fixup","aw"; \
.align 1; \ .align 1; \
.long 0b; \ .long 0b; \
......
...@@ -183,7 +183,8 @@ __after_mmu_off: ...@@ -183,7 +183,8 @@ __after_mmu_off:
bl reloc_offset bl reloc_offset
mr r26,r3 mr r26,r3
addis r4,r3,KERNELBASE@h /* current address of _start */ addis r4,r3,KERNELBASE@h /* current address of _start */
cmpwi 0,r4,0 /* are we already running at 0? */ lis r5,PHYSICAL_START@h
cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
bne relocate_kernel bne relocate_kernel
/* /*
* we now have the 1st 16M of ram mapped with the bats. * we now have the 1st 16M of ram mapped with the bats.
...@@ -811,13 +812,13 @@ giveup_altivec: ...@@ -811,13 +812,13 @@ giveup_altivec:
/* /*
* This code is jumped to from the startup code to copy * This code is jumped to from the startup code to copy
* the kernel image to physical address 0. * the kernel image to physical address PHYSICAL_START.
*/ */
relocate_kernel: relocate_kernel:
addis r9,r26,klimit@ha /* fetch klimit */ addis r9,r26,klimit@ha /* fetch klimit */
lwz r25,klimit@l(r9) lwz r25,klimit@l(r9)
addis r25,r25,-KERNELBASE@h addis r25,r25,-KERNELBASE@h
li r3,0 /* Destination base address */ lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */ li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */ li r5,0x4000 /* # bytes of memory to copy */
bl copy_and_flush /* copy the first 0x4000 bytes */ bl copy_and_flush /* copy the first 0x4000 bytes */
...@@ -1188,11 +1189,11 @@ mmu_off: ...@@ -1188,11 +1189,11 @@ mmu_off:
/* /*
* Use the first pair of BAT registers to map the 1st 16MB * Use the first pair of BAT registers to map the 1st 16MB
* of RAM to KERNELBASE. From this point on we can't safely * of RAM to PAGE_OFFSET. From this point on we can't safely
* call OF any more. * call OF any more.
*/ */
initial_bats: initial_bats:
lis r11,KERNELBASE@h lis r11,PAGE_OFFSET@h
mfspr r9,SPRN_PVR mfspr r9,SPRN_PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi 0,r9,1 cmpwi 0,r9,1
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */ /* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE)) #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
#endif #endif
#endif #endif
......
...@@ -269,7 +269,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) ...@@ -269,7 +269,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
} }
/* /*
* Map in a big chunk of physical memory starting at KERNELBASE. * Map in a big chunk of physical memory starting at PAGE_OFFSET.
*/ */
void __init mapin_ram(void) void __init mapin_ram(void)
{ {
...@@ -278,7 +278,7 @@ void __init mapin_ram(void) ...@@ -278,7 +278,7 @@ void __init mapin_ram(void)
int ktext; int ktext;
s = mmu_mapin_ram(); s = mmu_mapin_ram();
v = KERNELBASE + s; v = PAGE_OFFSET + s;
p = memstart_addr + s; p = memstart_addr + s;
for (; s < total_lowmem; s += PAGE_SIZE) { for (; s < total_lowmem; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext); ktext = ((char *) v >= _stext && (char *) v < etext);
......
...@@ -95,16 +95,16 @@ unsigned long __init mmu_mapin_ram(void) ...@@ -95,16 +95,16 @@ unsigned long __init mmu_mapin_ram(void)
break; break;
} }
setbat(2, KERNELBASE, 0, bl, _PAGE_RAM); setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM);
done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1; done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1;
if ((done < tot) && !bat_addrs[3].limit) { if ((done < tot) && !bat_addrs[3].limit) {
/* use BAT3 to cover a bit more */ /* use BAT3 to cover a bit more */
tot -= done; tot -= done;
for (bl = 128<<10; bl < max_size; bl <<= 1) for (bl = 128<<10; bl < max_size; bl <<= 1)
if (bl * 2 > tot) if (bl * 2 > tot)
break; break;
setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM); setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM);
done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1; done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1;
} }
return done; return done;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment