Commit c5e296f5 authored by Anton Blanchard's avatar Anton Blanchard Committed by Linus Torvalds

[PATCH] ppc64 POWER3 segment table fix

The ppc64 fix last week (enforcing permissions on the kernel when
accessing userspace pages) uncovered a bug on POWER3/RS64. We werent
zeroing the segment table entry before overwriting it and it was possible
for the ks bit to be set on a kernel segment.

The VSID mask was also changed to match reality (we only use 13 bits).
parent 0b5403cf
......@@ -904,12 +904,13 @@ _GLOBAL(do_stab_bolted)
/* (((ea >> 28) & 0x1fff) << 15) | (ea >> 60) */
mfspr r21,DAR
rldicl r20,r21,36,32 /* Permits a full 32b of ESID */
rldicr r20,r20,15,48
rldicl r21,r21,4,60
or r20,r20,r21
rldicl r20,r21,36,51
sldi r20,r20,15
srdi r21,r21,60
or r20,r20,r21
li r21,9 /* VSID_RANDOMIZER */
/* VSID_RANDOMIZER */
li r21,9
sldi r21,r21,32
oris r21,r21,58231
ori r21,r21,39831
......@@ -933,11 +934,11 @@ _GLOBAL(do_stab_bolted)
rldicl r23,r23,57,63
cmpwi r23,0
bne 2f
ld r23,8(r21) /* Get the current vsid part of the ste */
li r23,0
rldimi r23,r20,12,0 /* Insert the new vsid value */
std r23,8(r21) /* Put new entry back into the stab */
eieio /* Order vsid update */
ld r23,0(r21) /* Get the esid part of the ste */
li r23,0
mfspr r20,DAR /* Get the new esid */
rldicl r20,r20,36,28 /* Permits a full 36b of ESID */
rldimi r23,r20,28,0 /* Insert the new esid value */
......@@ -971,13 +972,13 @@ _GLOBAL(do_stab_bolted)
std r23,0(r21)
sync
ld r23,8(r21)
li r23,0
rldimi r23,r20,12,0
std r23,8(r21)
eieio
ld r23,0(r21) /* Get the esid part of the ste */
mr r22,r23
ld r22,0(r21) /* Get the esid part of the ste */
li r23,0
mfspr r20,DAR /* Get the new esid */
rldicl r20,r20,36,28 /* Permits a full 32b of ESID */
rldimi r23,r20,28,0 /* Insert the new esid value */
......
......@@ -88,6 +88,8 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
for (group = 0; group < 2; group++) {
for (entry = 0; entry < 8; entry++, ste++) {
if (!(ste->dw0.dw0.v)) {
ste->dw0.dword0 = 0;
ste->dw1.dword1 = 0;
ste->dw1.dw1.vsid = vsid;
ste->dw0.dw0.esid = esid;
ste->dw0.dw0.kp = 1;
......@@ -135,6 +137,9 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
castout_ste->dw0.dw0.v = 0;
asm volatile("sync" : : : "memory"); /* Order update */
castout_ste->dw0.dword0 = 0;
castout_ste->dw1.dword1 = 0;
castout_ste->dw1.dw1.vsid = vsid;
old_esid = castout_ste->dw0.dw0.esid;
castout_ste->dw0.dw0.esid = esid;
......
......@@ -186,7 +186,7 @@ get_kernel_vsid( unsigned long ea )
{
unsigned long ordinal, vsid;
ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | (ea >> 60);
ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | (ea >> 60);
vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
ifppcdebug(PPCDBG_HTABSTRESS) {
......@@ -209,7 +209,7 @@ get_vsid( unsigned long context, unsigned long ea )
{
unsigned long ordinal, vsid;
ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context;
ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | context;
vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
ifppcdebug(PPCDBG_HTABSTRESS) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment