Commit 8b11bd12 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Patch up mmu context register writes for sun4v.

sun4v uses ASI_MMU instead of ASI_DMMU
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 481295f9
...@@ -97,10 +97,22 @@ do_fpdis: ...@@ -97,10 +97,22 @@ do_fpdis:
add %g6, TI_FPREGS + 0x80, %g1 add %g6, TI_FPREGS + 0x80, %g1
faddd %f0, %f2, %f4 faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6 fmuld %f0, %f2, %f6
ldxa [%g3] ASI_DMMU, %g5
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2 sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync membar #Sync
add %g6, TI_FPREGS + 0xc0, %g2 add %g6, TI_FPREGS + 0xc0, %g2
faddd %f0, %f2, %f8 faddd %f0, %f2, %f8
...@@ -126,11 +138,23 @@ do_fpdis: ...@@ -126,11 +138,23 @@ do_fpdis:
fzero %f32 fzero %f32
mov SECONDARY_CONTEXT, %g3 mov SECONDARY_CONTEXT, %g3
fzero %f34 fzero %f34
ldxa [%g3] ASI_DMMU, %g5
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
add %g6, TI_FPREGS, %g1 add %g6, TI_FPREGS, %g1
sethi %hi(sparc64_kern_sec_context), %g2 sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync membar #Sync
add %g6, TI_FPREGS + 0x40, %g2 add %g6, TI_FPREGS + 0x40, %g2
faddd %f32, %f34, %f36 faddd %f32, %f34, %f36
...@@ -155,10 +179,22 @@ do_fpdis: ...@@ -155,10 +179,22 @@ do_fpdis:
nop nop
3: mov SECONDARY_CONTEXT, %g3 3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1 add %g6, TI_FPREGS, %g1
ldxa [%g3] ASI_DMMU, %g5
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2 sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync membar #Sync
mov 0x40, %g2 mov 0x40, %g2
membar #Sync membar #Sync
...@@ -169,7 +205,13 @@ do_fpdis: ...@@ -169,7 +205,13 @@ do_fpdis:
ldda [%g1 + %g2] ASI_BLK_S, %f48 ldda [%g1 + %g2] ASI_BLK_S, %f48
membar #Sync membar #Sync
fpdis_exit: fpdis_exit:
stxa %g5, [%g3] ASI_DMMU
661: stxa %g5, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g5, [%g3] ASI_MMU
.previous
membar #Sync membar #Sync
fpdis_exit2: fpdis_exit2:
wr %g7, 0, %gsr wr %g7, 0, %gsr
...@@ -323,10 +365,22 @@ do_fptrap_after_fsr: ...@@ -323,10 +365,22 @@ do_fptrap_after_fsr:
rd %gsr, %g3 rd %gsr, %g3
stx %g3, [%g6 + TI_GSR] stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3 mov SECONDARY_CONTEXT, %g3
ldxa [%g3] ASI_DMMU, %g5
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2 sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync membar #Sync
add %g6, TI_FPREGS, %g2 add %g6, TI_FPREGS, %g2
andcc %g1, FPRS_DL, %g0 andcc %g1, FPRS_DL, %g0
...@@ -341,7 +395,13 @@ do_fptrap_after_fsr: ...@@ -341,7 +395,13 @@ do_fptrap_after_fsr:
stda %f48, [%g2 + %g3] ASI_BLK_S stda %f48, [%g2 + %g3] ASI_BLK_S
5: mov SECONDARY_CONTEXT, %g1 5: mov SECONDARY_CONTEXT, %g1
membar #Sync membar #Sync
stxa %g5, [%g1] ASI_DMMU
661: stxa %g5, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g5, [%g1] ASI_MMU
.previous
membar #Sync membar #Sync
ba,pt %xcc, etrap ba,pt %xcc, etrap
wr %g0, 0, %fprs wr %g0, 0, %fprs
......
...@@ -95,7 +95,13 @@ etrap_save: save %g2, -STACK_BIAS, %sp ...@@ -95,7 +95,13 @@ etrap_save: save %g2, -STACK_BIAS, %sp
wrpr %g2, 0, %wstate wrpr %g2, 0, %wstate
sethi %hi(sparc64_kern_pri_context), %g2 sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
661: stxa %g3, [%l4] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g3, [%l4] ASI_MMU
.previous
sethi %hi(KERNBASE), %l4 sethi %hi(KERNBASE), %l4
flush %l4 flush %l4
mov ASI_AIUS, %l7 mov ASI_AIUS, %l7
......
...@@ -304,11 +304,23 @@ jump_to_sun4u_init: ...@@ -304,11 +304,23 @@ jump_to_sun4u_init:
sun4u_init: sun4u_init:
/* Set ctx 0 */ /* Set ctx 0 */
mov PRIMARY_CONTEXT, %g7 mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
661: stxa %g0, [%g7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g0, [%g7] ASI_MMU
.previous
membar #Sync membar #Sync
mov SECONDARY_CONTEXT, %g7 mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
661: stxa %g0, [%g7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g0, [%g7] ASI_MMU
.previous
membar #Sync membar #Sync
BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
...@@ -436,8 +448,15 @@ setup_trap_table: ...@@ -436,8 +448,15 @@ setup_trap_table:
/* Start using proper page size encodings in ctx register. */ /* Start using proper page size encodings in ctx register. */
sethi %hi(sparc64_kern_pri_context), %g3 sethi %hi(sparc64_kern_pri_context), %g3
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1 mov PRIMARY_CONTEXT, %g1
stxa %g2, [%g1] ASI_DMMU
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
membar #Sync membar #Sync
/* Kill PROM timer */ /* Kill PROM timer */
......
...@@ -264,11 +264,23 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ...@@ -264,11 +264,23 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
brnz,pn %l3, kern_rtt brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7 mov PRIMARY_CONTEXT, %l7
ldxa [%l7 + %l7] ASI_DMMU, %l0
661: ldxa [%l7 + %l7] ASI_DMMU, %l0
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%l7 + %l7] ASI_MMU, %l0
.previous
sethi %hi(sparc64_kern_pri_nuc_bits), %l1 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0 or %l0, %l1, %l0
stxa %l0, [%l7] ASI_DMMU
661: stxa %l0, [%l7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %l0, [%l7] ASI_MMU
.previous
sethi %hi(KERNBASE), %l7 sethi %hi(KERNBASE), %l7
flush %l7 flush %l7
rdpr %wstate, %l1 rdpr %wstate, %l1
...@@ -303,7 +315,13 @@ user_rtt_fill_fixup: ...@@ -303,7 +315,13 @@ user_rtt_fill_fixup:
sethi %hi(sparc64_kern_pri_context), %g2 sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1 mov PRIMARY_CONTEXT, %g1
stxa %g2, [%g1] ASI_DMMU
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
sethi %hi(KERNBASE), %g1 sethi %hi(KERNBASE), %g1
flush %g1 flush %g1
......
...@@ -189,26 +189,30 @@ int prom_callback(long *args) ...@@ -189,26 +189,30 @@ int prom_callback(long *args)
} }
if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
if (tlb_type == spitfire) {
extern unsigned long sparc64_kern_pri_context; extern unsigned long sparc64_kern_pri_context;
/* Spitfire Errata #32 workaround */ /* Spitfire Errata #32 workaround */
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
"flush %%g6" "flush %%g6"
: /* No outputs */ : /* No outputs */
: "r" (sparc64_kern_pri_context), : "r" (sparc64_kern_pri_context),
"r" (PRIMARY_CONTEXT), "r" (PRIMARY_CONTEXT),
"i" (ASI_DMMU)); "i" (ASI_DMMU));
}
/* /*
* Locked down tlb entry. * Locked down tlb entry.
*/ */
if (tlb_type == spitfire) if (tlb_type == spitfire) {
tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT); tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
else if (tlb_type == cheetah || tlb_type == cheetah_plus) res = PROM_TRUE;
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT); tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
res = PROM_TRUE; res = PROM_TRUE;
}
goto done; goto done;
} }
......
...@@ -272,10 +272,22 @@ do_unlock: ...@@ -272,10 +272,22 @@ do_unlock:
wr %g0, ASI_P, %asi wr %g0, ASI_P, %asi
mov PRIMARY_CONTEXT, %g7 mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
661: stxa %g0, [%g7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g0, [%g7] ASI_MMU
.previous
membar #Sync membar #Sync
mov SECONDARY_CONTEXT, %g7 mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
661: stxa %g0, [%g7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g0, [%g7] ASI_MMU
.previous
membar #Sync membar #Sync
mov 1, %g5 mov 1, %g5
...@@ -304,7 +316,13 @@ do_unlock: ...@@ -304,7 +316,13 @@ do_unlock:
sethi %hi(sparc64_kern_pri_context), %g3 sethi %hi(sparc64_kern_pri_context), %g3
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1 mov PRIMARY_CONTEXT, %g1
stxa %g2, [%g1] ASI_DMMU
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
membar #Sync membar #Sync
rdpr %pstate, %o1 rdpr %pstate, %o1
......
...@@ -792,15 +792,6 @@ void sparc_ultra_dump_dtlb(void) ...@@ -792,15 +792,6 @@ void sparc_ultra_dump_dtlb(void)
} }
} }
static inline void spitfire_errata32(void)
{
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
}
extern unsigned long cmdline_memory_size; extern unsigned long cmdline_memory_size;
unsigned long __init bootmem_init(unsigned long *pages_avail) unsigned long __init bootmem_init(unsigned long *pages_avail)
......
...@@ -30,16 +30,6 @@ extern void prom_world(int); ...@@ -30,16 +30,6 @@ extern void prom_world(int);
extern void prom_cif_interface(void); extern void prom_cif_interface(void);
extern void prom_cif_callback(void); extern void prom_cif_callback(void);
static inline unsigned long spitfire_get_primary_context(void)
{
unsigned long ctx;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ctx)
: "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
return ctx;
}
/* /*
* This provides SMP safety on the p1275buf. prom_callback() drops this lock * This provides SMP safety on the p1275buf. prom_callback() drops this lock
* to allow recursuve acquisition. * to allow recursuve acquisition.
...@@ -55,7 +45,6 @@ long p1275_cmd(const char *service, long fmt, ...) ...@@ -55,7 +45,6 @@ long p1275_cmd(const char *service, long fmt, ...)
long attrs, x; long attrs, x;
p = p1275buf.prom_buffer; p = p1275buf.prom_buffer;
BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0);
spin_lock_irqsave(&prom_entry_lock, flags); spin_lock_irqsave(&prom_entry_lock, flags);
......
...@@ -41,11 +41,16 @@ extern void smp_tsb_sync(struct mm_struct *mm); ...@@ -41,11 +41,16 @@ extern void smp_tsb_sync(struct mm_struct *mm);
/* Set MMU context in the actual hardware. */ /* Set MMU context in the actual hardware. */
#define load_secondary_context(__mm) \ #define load_secondary_context(__mm) \
__asm__ __volatile__("stxa %0, [%1] %2\n\t" \ __asm__ __volatile__( \
"flush %%g6" \ "\n661: stxa %0, [%1] %2\n" \
" .section .sun4v_1insn_patch, \"ax\"\n" \
" .word 661b\n" \
" stxa %0, [%1] %3\n" \
" .previous\n" \
" flush %%g6\n" \
: /* No outputs */ \ : /* No outputs */ \
: "r" (CTX_HWBITS((__mm)->context)), \ : "r" (CTX_HWBITS((__mm)->context)), \
"r" (SECONDARY_CONTEXT), "i" (ASI_DMMU)) "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
extern void __flush_tlb_mm(unsigned long, unsigned long); extern void __flush_tlb_mm(unsigned long, unsigned long);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment