Commit 80b228ce authored by David S. Miller's avatar David S. Miller

On sparc64, do not put PAGE_OFFSET in g4 anymore,

put current task there instead.
parent 267bdcc1
......@@ -96,15 +96,15 @@ etrap_irq2: rdpr %tstate, %g1 ! Single Group
stx %i2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2] ! Store Group
stx %i3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I3] ! Store Group
stx %i4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I4] ! Store Group
sethi %uhi(PAGE_OFFSET), %g4 ! IEU0
stx %i5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I5] ! Store Group
stx %i6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I6] ! Store Group
sllx %g4, 32, %g4 ! IEU0
stx %i7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I7] ! Store Group
wrpr %g0, ETRAP_PSTATE2, %pstate ! Single Group+4bubbles
jmpl %l2 + 0x4, %g0 ! CTI Group
mov %l6, %g6 ! IEU0
jmpl %l2 + 0x4, %g0 ! CTI Group
ldx [%g6 + TI_TASK], %g4 ! Load
nop
nop
nop
......@@ -187,14 +187,14 @@ scetrap: rdpr %pil, %g2 ! Single Group
stx %i3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I3] ! Store Group
stx %i4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I4] ! Store Group
sethi %uhi(PAGE_OFFSET), %g4 ! IEU0
stx %i5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I5] ! Store Group
stx %i6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I6] ! Store Group
sllx %g4, 32, %g4 ! IEU0
mov %l6, %g6 ! IEU1
stx %i7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I7] ! Store Group
ldx [%g6 + TI_TASK], %g4 ! Load Group
done
nop
nop
#undef TASK_REGOFF
#undef ETRAP_PSTATE1
......
......@@ -419,9 +419,6 @@ sun4u_init:
stxa %g0, [%g7] ASI_DMMU
membar #Sync
sethi %uhi(PAGE_OFFSET), %g4
sllx %g4, 32, %g4
/* We are now safely (we hope) in Nucleus context (0), rewrite
* the KERNBASE TTE's so they no longer have the global bit set.
* Don't forget to setup TAG_ACCESS first 8-)
......@@ -493,6 +490,7 @@ spitfire_tlb_fixup:
tlb_fixup_done:
sethi %hi(init_thread_union), %g6
or %g6, %lo(init_thread_union), %g6
ldx [%g6 + TI_TASK], %g4
mov %sp, %l6
mov %o4, %l7
......
......@@ -627,6 +627,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
(void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
sizeof(struct reg_window));
t->kregs->u_regs[UREG_G6] = (unsigned long) t;
t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
} else {
if (t->flags & _TIF_32BIT) {
sp &= 0x00000000ffffffffUL;
......
......@@ -181,9 +181,6 @@ startup_continue:
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
wr %g0, 0, %fprs
sethi %uhi(PAGE_OFFSET), %g4
sllx %g4, 32, %g4
/* XXX Buggy PROM... */
srl %o0, 0, %o0
ldx [%o0], %g6
......@@ -258,6 +255,9 @@ startup_continue:
#undef VPTE_BASE_SPITFIRE
#undef VPTE_BASE_CHEETAH
wrpr %o1, 0x0, %pstate
ldx [%g6 + TI_TASK], %g4
/* Setup interrupt globals, we are always SMP. */
wrpr %o1, PSTATE_IG, %pstate
......
......@@ -73,9 +73,8 @@ fill_fixup:
stx %g5, [%g6 + TI_FAULT_ADDR]
wrpr %g0, 0x0, %tl ! Out of trap levels.
wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
sethi %uhi(PAGE_OFFSET), %g4 ! Prepare page_offset global reg
mov %o7, %g6
sllx %g4, 32, %g4 ! and finish it...
ldx [%g6 + TI_TASK], %g4
/* This is the same as below, except we handle this a bit special
* since we must preserve %l5 and %l6, see comment above.
......@@ -195,9 +194,8 @@ fill_fixup_mna:
mov %g6, %o7 ! Stash away current.
wrpr %g0, 0x0, %tl ! Out of trap levels.
wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
sethi %uhi(PAGE_OFFSET), %g4 ! Set page_offset global reg.
mov %o7, %g6 ! Get current back.
sllx %g4, 32, %g4 ! Finish it.
ldx [%g6 + TI_TASK], %g4 ! Finish it.
call mem_address_unaligned
add %sp, STACK_BIAS + REGWIN_SZ, %o0
......@@ -302,9 +300,8 @@ fill_fixup_dax:
mov %g6, %o7 ! Stash away current.
wrpr %g0, 0x0, %tl ! Out of trap levels.
wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
sethi %uhi(PAGE_OFFSET), %g4 ! Set page_offset global reg.
mov %o7, %g6 ! Get current back.
sllx %g4, 32, %g4 ! Finish it.
ldx [%g6 + TI_TASK], %g4 ! Finish it.
call data_access_exception
add %sp, STACK_BIAS + REGWIN_SZ, %o0
......
......@@ -28,6 +28,7 @@
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#define ASI_BLK_XOR 0
#define ASI_BLK_XOR1 (ASI_BLK_P ^ (ASI_BLK_P >> 3) ^ ASI_P)
#define ASI_BLK_OR (ASI_BLK_P & ~ASI_P)
......@@ -841,9 +842,8 @@ ett: rd %asi, %x4 /* LSU Group+4bubbles */
1: retl /* CTI Group brk forced*/
srl %src, 0, %src /* IEU0 */
#else
1: sethi %uhi(PAGE_OFFSET), %g4 /* IEU0 Group */
retl /* CTI Group brk forced*/
sllx %g4, 32, %g4 /* IEU0 */
1: retl /* CTI Group brk forced*/
ldx [%g6 + TI_TASK], %g4 /* Load */
#endif
26: andcc %len, 8, %g0 /* IEU1 Group */
be,pn %icc, 1f /* CTI */
......
......@@ -29,6 +29,7 @@
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#define ASI_BLK_XOR 0
#define ASI_BLK_XOR1 (ASI_BLK_P ^ (ASI_BLK_P >> 3) ^ ASI_P)
#define ASI_BLK_OR (ASI_BLK_P & ~ASI_P)
......@@ -860,9 +861,8 @@ ett: rd %gsr, %x3 /* LSU Group+4bubbles */
1: retl /* CTI Group brk forced*/
srl %src, 0, %src /* IEU0 */
#else
1: sethi %uhi(PAGE_OFFSET), %g4 /* IEU0 Group */
retl /* CTI Group brk forced*/
sllx %g4, 32, %g4 /* IEU0 */
1: retl /* CTI Group brk forced*/
ldx [%g6 + TI_TASK], %g4 /* Load */
#endif
26: andcc %len, 8, %g0 /* IEU1 Group */
be,pn %icc, 1f /* CTI */
......
......@@ -38,11 +38,13 @@
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
VISEntry
sethi %hi(PAGE_SIZE), %g3
sub %o0, %g4, %g1
sethi %uhi(PAGE_OFFSET), %g2
sllx %g2, 32, %g2
sub %o0, %g2, %g1
and %o2, %g3, %o0
sethi %hi(TLBTEMP_BASE), %o3
sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
sub %o1, %g4, %g2
sub %o1, %g2, %g2
sllx %g3, 32, %g3
mov TLB_TAG_ACCESS, %o2
or %g3, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), %g3
......@@ -337,7 +339,9 @@ _clear_page: /* %o0=dest */
clear_user_page: /* %o0=dest, %o1=vaddr */
VISEntryHalf
sethi %hi(PAGE_SIZE), %g3
sub %o0, %g4, %g1
sethi %uhi(PAGE_OFFSET), %g2
sllx %g2, 32, %g2
sub %o0, %g2, %g1
and %o1, %g3, %o0
mov TLB_TAG_ACCESS, %o2
sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
......
......@@ -94,7 +94,7 @@ cc_end_cruft:
or %o5, %o4, %o4 ! IEU0 Group
addcc %o4, %sum, %sum ! IEU1
bcc,pt %xcc, ccfold ! CTI
sethi %uhi(PAGE_OFFSET), %g4 ! IEU0 Group
nop ! IEU0 Group
b,pt %xcc, ccfold ! CTI
add %sum, 1, %sum ! IEU1
......@@ -174,14 +174,14 @@ cctbl: CSUMCOPY_LASTCHUNK(0xe8,%g2,%g3)
12:
andcc %len, 0xf, %g7 ! IEU1 Group
ccte: bne,pn %icc, cc_end_cruft ! CTI
sethi %uhi(PAGE_OFFSET), %g4 ! IEU0
nop ! IEU0
ccfold: sllx %sum, 32, %o0 ! IEU0 Group
addcc %sum, %o0, %o0 ! IEU1 Group (regdep)
srlx %o0, 32, %o0 ! IEU0 Group (regdep)
bcs,a,pn %xcc, 1f ! CTI
add %o0, 1, %o0 ! IEU1 4 clocks (mispredict)
1: retl ! CTI Group brk forced
sllx %g4, 32, %g4 ! IEU0 Group
ldx [%g6 + TI_TASK], %g4 ! Load
ccslow: mov 0, %g5
brlez,pn %len, 4f
......@@ -321,7 +321,7 @@ cc_user_end_cruft:
or %o5, %o4, %o4 ! IEU0 Group
addcc %o4, %sum, %sum ! IEU1
bcc,pt %xcc, ccuserfold ! CTI
sethi %uhi(PAGE_OFFSET), %g4 ! IEU0 Group
nop ! IEU0 Group
b,pt %xcc, ccuserfold ! CTI
add %sum, 1, %sum ! IEU1
......@@ -404,7 +404,7 @@ ccusertbl:
andcc %len, 0xf, %g7 ! IEU1 Group
ccuserte:
bne,pn %icc, cc_user_end_cruft ! CTI
sethi %uhi(PAGE_OFFSET), %g4 ! IEU0
nop ! IEU0
ccuserfold:
sllx %sum, 32, %o0 ! IEU0 Group
addcc %sum, %o0, %o0 ! IEU1 Group (regdep)
......@@ -412,7 +412,7 @@ ccuserfold:
bcs,a,pn %xcc, 1f ! CTI
add %o0, 1, %o0 ! IEU1 4 clocks (mispredict)
1: retl ! CTI Group brk forced
sllx %g4, 32, %g4 ! IEU0 Group
ldx [%g6 + TI_TASK], %g4 ! IEU0 Group
ccuserslow:
mov 0, %g5
......@@ -502,10 +502,9 @@ cpc_handler:
sub %g0, EFAULT, %g2
brnz,a,pt %g1, 1f
st %g2, [%g1]
1: sethi %uhi(PAGE_OFFSET), %g4
wr %g3, %g0, %asi
1: wr %g3, %g0, %asi
retl
sllx %g4, 32, %g4
ldx [%g6 + TI_TASK], %g4
.section __ex_table
.align 4
......
......@@ -12,6 +12,7 @@
#include <asm/mmu_context.h>
#include <asm/pil.h>
#include <asm/head.h>
#include <asm/thread_info.h>
/* Basically, all this madness has to do with the
* fact that Cheetah does not support IMMU flushes
......@@ -288,9 +289,8 @@ __flush_icache_page: /* %o0 = phys_page */
addx %g0, %g0, %g0
nop
sethi %uhi(PAGE_OFFSET), %g4
retl
sllx %g4, 32, %g4
ldx [%g6 + TI_TASK], %g4
iflush1:sub %o1, 0x20, %g3
stxa %g0, [%g3] ASI_IC_TAG
......@@ -304,7 +304,9 @@ iflush2:sub %o1, 0x20, %g3
.align 64
.globl __flush_dcache_page
__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
sub %o0, %g4, %o0
sethi %uhi(PAGE_OFFSET), %g1
sllx %g1, 32, %g1
sub %o0, %g1, %o0
rdpr %ver, %g1
sethi %hi(0x003e0014), %g2
......
......@@ -70,9 +70,8 @@ linux_syscall_for_solaris:
.align 32
.globl solaris_sparc_syscall, entry64_personality_patch
solaris_sparc_syscall:
ldx [%g6 + TI_TASK], %l0
entry64_personality_patch:
ldub [%l0 + 0x0], %l0
ldub [%g4 + 0x0], %l0
cmp %g1, 255
bg,pn %icc, solaris_unimplemented
srl %g1, 0, %g1
......
......@@ -3,6 +3,6 @@
#include <asm/thread_info.h>
#define current (current_thread_info()->task)
register struct task_struct *current asm("g4");
#endif /* !(_SPARC64_CURRENT_H) */
......@@ -106,12 +106,10 @@ typedef unsigned long iopgprot_t;
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#ifndef __ASSEMBLY__
/* Do prdele, look what happens to be in %g4... */
register unsigned long PAGE_OFFSET asm("g4");
#else
/* We used to stick this into a hard-coded global register (%g4)
* but that does not make sense anymore.
*/
#define PAGE_OFFSET 0xFFFFF80000000000
#endif
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
......
......@@ -209,6 +209,7 @@ do { CHECK_LOCKS(prev); \
"ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
"wrpr %%g0, 0x94, %%pstate\n\t" \
"mov %%l2, %%g6\n\t" \
"ldx [%%g6 + %6], %%g4\n\t" \
"wrpr %%g0, 0x96, %%pstate\n\t" \
"andcc %%o7, %5, %%g0\n\t" \
"bne,pn %%icc, ret_from_syscall\n\t" \
......@@ -216,7 +217,7 @@ do { CHECK_LOCKS(prev); \
: /* no outputs */ \
: "r" (next->thread_info), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \
"i" (_TIF_NEWCHILD) \
"i" (_TIF_NEWCHILD), "i" (TI_TASK) \
: "cc", "g1", "g2", "g3", "g5", "g7", \
"l2", "l3", "l4", "l5", "l6", "l7", \
"i0", "i1", "i2", "i3", "i4", "i5", \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment