The POWER4 support merged previously was incomplete, add the missing bits

so that the kernel boots at least on POWER4 and G5 CPUs
parent bacd2782
...@@ -181,26 +181,8 @@ __after_mmu_off: ...@@ -181,26 +181,8 @@ __after_mmu_off:
bl setup_disp_bat bl setup_disp_bat
#endif #endif
#else /* CONFIG_POWER4 */ #else /* CONFIG_POWER4 */
/*
* Load up the SDR1 and segment register values now
* since we don't have the BATs.
* Also make sure we are running in 32-bit mode.
*/
bl reloc_offset bl reloc_offset
addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ bl initial_mm_power4
lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */
mtspr SDR1,r14
slbia
lis r4,0x2000 /* set pseudo-segment reg 12 */
ori r5,r4,0x0ccc
mtsr 12,r5
ori r4,r4,0x0888 /* set pseudo-segment reg 8 */
mtsr 8,r4 /* (for access to serial port) */
mfmsr r0
clrldi r0,r0,1
sync
mtmsr r0
isync
#endif /* CONFIG_POWER4 */ #endif /* CONFIG_POWER4 */
/* /*
...@@ -1637,6 +1619,34 @@ setup_disp_bat: ...@@ -1637,6 +1619,34 @@ setup_disp_bat:
#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
#else /* CONFIG_POWER4 */ #else /* CONFIG_POWER4 */
/*
* Load up the SDR1 and segment register values now
* since we don't have the BATs.
* Also make sure we are running in 32-bit mode.
*/
initial_mm_power4:
addis r14,r3,_SDR1@ha /* get the value from _SDR1 */
lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */
mtspr SDR1,r14
slbia
lis r4,0x2000 /* set pseudo-segment reg 12 */
ori r5,r4,0x0ccc
mtsr 12,r5
ori r5,r4,0x0888 /* set pseudo-segment reg 8 */
mtsr 8,r5 /* (for access to serial port) */
ori r5,r4,0x0999 /* set pseudo-segment reg 8 */
mtsr 9,r5 /* (for access to screen) */
mfmsr r0
clrldi r0,r0,1
sync
mtmsr r0
isync
blr
/*
* On 970 (G5), we pre-set a few bits in HID0 & HID1
*/
ppc970_setup_hid: ppc970_setup_hid:
li r0,0 li r0,0
sync sync
......
...@@ -417,6 +417,21 @@ _GLOBAL(hash_page_patch_C) ...@@ -417,6 +417,21 @@ _GLOBAL(hash_page_patch_C)
lwz r6,next_slot@l(r4) lwz r6,next_slot@l(r4)
addi r6,r6,PTE_SIZE addi r6,r6,PTE_SIZE
andi. r6,r6,7*PTE_SIZE andi. r6,r6,7*PTE_SIZE
#ifdef CONFIG_POWER4
/*
* Since we don't have BATs on POWER4, we rely on always having
* PTEs in the hash table to map the hash table and the code
* that manipulates it in virtual mode, namely flush_hash_page and
* flush_hash_segments. Otherwise we can get a DSI inside those
* routines which leads to a deadlock on the hash_table_lock on
* SMP machines. We avoid this by never overwriting the first
* PTE of each PTEG if it is already valid.
* -- paulus.
*/
bne 102f
li r6,PTE_SIZE
102:
#endif /* CONFIG_POWER4 */
stw r6,next_slot@l(r4) stw r6,next_slot@l(r4)
add r4,r3,r6 add r4,r3,r6
......
...@@ -211,6 +211,17 @@ void __init MMU_init_hw(void) ...@@ -211,6 +211,17 @@ void __init MMU_init_hw(void)
#define MIN_N_HPTEG 1024 /* min 64kB hash table */ #define MIN_N_HPTEG 1024 /* min 64kB hash table */
#endif #endif
#ifdef CONFIG_POWER4
/* The hash table has already been allocated and initialized
in prom.c */
n_hpteg = Hash_size >> LG_HPTEG_SIZE;
lg_n_hpteg = __ilog2(n_hpteg);
/* Remove the hash table from the available memory */
if (Hash)
reserve_phys_mem(__pa(Hash), Hash_size);
#else /* CONFIG_POWER4 */
/* /*
* Allow 1 HPTE (1/8 HPTEG) for each page of memory. * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
* This is less than the recommended amount, but then * This is less than the recommended amount, but then
...@@ -224,13 +235,7 @@ void __init MMU_init_hw(void) ...@@ -224,13 +235,7 @@ void __init MMU_init_hw(void)
++lg_n_hpteg; /* round up if not power of 2 */ ++lg_n_hpteg; /* round up if not power of 2 */
n_hpteg = 1 << lg_n_hpteg; n_hpteg = 1 << lg_n_hpteg;
} }
Hash_size = n_hpteg << LG_HPTEG_SIZE; Hash_size = n_hpteg << LG_HPTEG_SIZE;
Hash_mask = n_hpteg - 1;
hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
if (lg_n_hpteg > 16)
mb2 = 16 - LG_HPTEG_SIZE;
/* /*
* Find some memory for the hash table. * Find some memory for the hash table.
...@@ -240,6 +245,7 @@ void __init MMU_init_hw(void) ...@@ -240,6 +245,7 @@ void __init MMU_init_hw(void)
cacheable_memzero(Hash, Hash_size); cacheable_memzero(Hash, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS; _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
#endif /* CONFIG_POWER4 */
printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n", printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
total_memory >> 20, Hash_size >> 10, Hash); total_memory >> 20, Hash_size >> 10, Hash);
...@@ -249,6 +255,12 @@ void __init MMU_init_hw(void) ...@@ -249,6 +255,12 @@ void __init MMU_init_hw(void)
* Patch up the instructions in hashtable.S:create_hpte * Patch up the instructions in hashtable.S:create_hpte
*/ */
if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345); if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
Hash_mask = n_hpteg - 1;
hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
if (lg_n_hpteg > 16)
mb2 = 16 - LG_HPTEG_SIZE;
hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff) hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
| ((unsigned int)(Hash) >> 16); | ((unsigned int)(Hash) >> 16);
hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6); hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment