Commit 4b961552 authored by Dmitry Korotin's avatar Dmitry Korotin Committed by Greg Kroah-Hartman

MIPS: Add missing EHB in mtc0 -> mfc0 sequence.

commit 0b24cae4 upstream.

Add a missing EHB (Execution Hazard Barrier) in mtc0 -> mfc0 sequence.
Without this execution hazard barrier it's possible for the value read
back from the KScratch register to be the value from before the mtc0.

Reproducible on P5600 & P6600.

The hazard is documented in the MIPS Architecture Reference Manual Vol.
III: MIPS32/microMIPS32 Privileged Resource Architecture (MD00088), rev
6.03 table 8.1 which includes:

   Producer | Consumer | Hazard
  ----------|----------|----------------------------
   mtc0     | mfc0     | any coprocessor 0 register
Signed-off-by: default avatarDmitry Korotin <dkorotin@wavecomp.com>
[paul.burton@mips.com:
  - Commit message tweaks.
  - Add Fixes tags.
  - Mark for stable back to v3.15 where P5600 support was introduced.]
Signed-off-by: default avatarPaul Burton <paul.burton@mips.com>
Fixes: 3d8bfdd0 ("MIPS: Use C0_KScratch (if present) to hold PGD pointer.")
Fixes: 829dcc0a ("MIPS: Add MIPS P5600 probe support")
Cc: linux-mips@vger.kernel.org
Cc: stable@vger.kernel.org # v3.15+
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e395c337
...@@ -391,6 +391,7 @@ static struct work_registers build_get_work_registers(u32 **p) ...@@ -391,6 +391,7 @@ static struct work_registers build_get_work_registers(u32 **p)
static void build_restore_work_registers(u32 **p) static void build_restore_work_registers(u32 **p)
{ {
if (scratch_reg >= 0) { if (scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return; return;
} }
...@@ -668,10 +669,12 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r, ...@@ -668,10 +669,12 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
uasm_i_mtc0(p, 0, C0_PAGEMASK); uasm_i_mtc0(p, 0, C0_PAGEMASK);
uasm_il_b(p, r, lid); uasm_il_b(p, r, lid);
} }
if (scratch_reg >= 0) if (scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else } else {
UASM_i_LW(p, 1, scratchpad_offset(0), 0); UASM_i_LW(p, 1, scratchpad_offset(0), 0);
}
} else { } else {
/* Reset default page size */ /* Reset default page size */
if (PM_DEFAULT_MASK >> 16) { if (PM_DEFAULT_MASK >> 16) {
...@@ -938,10 +941,12 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -938,10 +941,12 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_jr(p, ptr); uasm_i_jr(p, ptr);
if (mode == refill_scratch) { if (mode == refill_scratch) {
if (scratch_reg >= 0) if (scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
else } else {
UASM_i_LW(p, 1, scratchpad_offset(0), 0); UASM_i_LW(p, 1, scratchpad_offset(0), 0);
}
} else { } else {
uasm_i_nop(p); uasm_i_nop(p);
} }
...@@ -1258,6 +1263,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, ...@@ -1258,6 +1263,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
if (c0_scratch_reg >= 0) { if (c0_scratch_reg >= 0) {
uasm_i_ehb(p);
UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg); UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
build_tlb_write_entry(p, l, r, tlb_random); build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p); uasm_l_leave(l, *p);
...@@ -1603,15 +1609,17 @@ static void build_setup_pgd(void) ...@@ -1603,15 +1609,17 @@ static void build_setup_pgd(void)
uasm_i_dinsm(&p, a0, 0, 29, 64 - 29); uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
uasm_l_tlbl_goaround1(&l, p); uasm_l_tlbl_goaround1(&l, p);
UASM_i_SLL(&p, a0, a0, 11); UASM_i_SLL(&p, a0, a0, 11);
uasm_i_jr(&p, 31);
UASM_i_MTC0(&p, a0, C0_CONTEXT); UASM_i_MTC0(&p, a0, C0_CONTEXT);
uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} else { } else {
/* PGD in c0_KScratch */ /* PGD in c0_KScratch */
uasm_i_jr(&p, 31);
if (cpu_has_ldpte) if (cpu_has_ldpte)
UASM_i_MTC0(&p, a0, C0_PWBASE); UASM_i_MTC0(&p, a0, C0_PWBASE);
else else
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} }
#else #else
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -1625,13 +1633,16 @@ static void build_setup_pgd(void) ...@@ -1625,13 +1633,16 @@ static void build_setup_pgd(void)
UASM_i_LA_mostly(&p, a2, pgdc); UASM_i_LA_mostly(&p, a2, pgdc);
UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2); UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
#endif /* SMP */ #endif /* SMP */
uasm_i_jr(&p, 31);
/* if pgd_reg is allocated, save PGD also to scratch register */ /* if pgd_reg is allocated, save PGD also to scratch register */
if (pgd_reg != -1) if (pgd_reg != -1) {
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg); UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
else uasm_i_jr(&p, 31);
uasm_i_ehb(&p);
} else {
uasm_i_jr(&p, 31);
uasm_i_nop(&p); uasm_i_nop(&p);
}
#endif #endif
if (p >= (u32 *)tlbmiss_handler_setup_pgd_end) if (p >= (u32 *)tlbmiss_handler_setup_pgd_end)
panic("tlbmiss_handler_setup_pgd space exceeded"); panic("tlbmiss_handler_setup_pgd space exceeded");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment