Commit 187dc563 authored by Tony Luck's avatar Tony Luck Committed by David Mosberger

[PATCH] ia64: put kernel into virtually mapped area

This patch moves the kernel text and data into region 5 (0xa00...) by using
a translation register to pin the entire area (i.e., no TLB faults).
The 1st-order goal is to be able to boot a kernel even when there is
no usable memory in the 64-128MB range.  It is also a step towards enabling
text-replication on NUMA.
parent c77de4f2
...@@ -62,7 +62,7 @@ GLOBAL_ENTRY(efi_call_phys) ...@@ -62,7 +62,7 @@ GLOBAL_ENTRY(efi_call_phys)
mov b6=r2 mov b6=r2
;; ;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
br.call.sptk.many rp=ia64_switch_mode br.call.sptk.many rp=ia64_switch_mode_phys
.ret0: mov out4=in5 .ret0: mov out4=in5
mov out0=in1 mov out0=in1
mov out1=in2 mov out1=in2
...@@ -73,7 +73,7 @@ GLOBAL_ENTRY(efi_call_phys) ...@@ -73,7 +73,7 @@ GLOBAL_ENTRY(efi_call_phys)
br.call.sptk.many rp=b6 // call the EFI function br.call.sptk.many rp=b6 // call the EFI function
.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 mov r16=loc3
br.call.sptk.many rp=ia64_switch_mode // return to virtual mode br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2: mov ar.rsc=loc4 // restore RSE configuration .ret2: mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc1 mov ar.pfs=loc1
mov rp=loc0 mov rp=loc0
......
...@@ -178,15 +178,12 @@ GLOBAL_ENTRY(ia64_switch_to) ...@@ -178,15 +178,12 @@ GLOBAL_ENTRY(ia64_switch_to)
;; ;;
st8 [r22]=sp // save kernel stack pointer of old task st8 [r22]=sp // save kernel stack pointer of old task
shr.u r26=r20,IA64_GRANULE_SHIFT shr.u r26=r20,IA64_GRANULE_SHIFT
shr.u r17=r20,KERNEL_TR_PAGE_SHIFT
;;
cmp.ne p6,p7=KERNEL_TR_PAGE_NUM,r17
adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
;; ;;
/* /*
* If we've already mapped this task's page, we can skip doing it again. * If we've already mapped this task's page, we can skip doing it again.
*/ */
(p6) cmp.eq p7,p6=r26,r27 cmp.eq p7,p6=r26,r27
(p6) br.cond.dpnt .map (p6) br.cond.dpnt .map
;; ;;
.done: .done:
......
...@@ -60,22 +60,42 @@ start_ap: ...@@ -60,22 +60,42 @@ start_ap:
mov r4=r0 mov r4=r0
.body .body
/*
* Initialize the region register for region 7 and install a translation register
* that maps the kernel's text and data:
*/
rsm psr.i | psr.ic rsm psr.i | psr.ic
mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET) << 8) | (IA64_GRANULE_SHIFT << 2))
;; ;;
srlz.i srlz.i
;;
/*
* Initialize kernel region registers:
* rr[5]: VHPT enabled, page size = PAGE_SHIFT
* rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
* rr[5]: VHPT disabled, page size = IA64_GRANULE_SHIFT
*/
mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
movl r17=(5<<61)
mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
movl r19=(6<<61)
mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
movl r21=(7<<61)
;;
mov rr[r17]=r16
mov rr[r19]=r18
mov rr[r21]=r20
;;
/*
* Now pin mappings into the TLB for kernel text and data
*/
mov r18=KERNEL_TR_PAGE_SHIFT<<2 mov r18=KERNEL_TR_PAGE_SHIFT<<2
movl r17=KERNEL_START movl r17=KERNEL_START
;; ;;
mov rr[r17]=r16
mov cr.itir=r18 mov cr.itir=r18
mov cr.ifa=r17 mov cr.ifa=r17
mov r16=IA64_TR_KERNEL mov r16=IA64_TR_KERNEL
movl r18=((1 << KERNEL_TR_PAGE_SHIFT) | PAGE_KERNEL) mov r3=ip
movl r18=PAGE_KERNEL
;;
dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
;;
or r18=r2,r18
;; ;;
srlz.i srlz.i
;; ;;
...@@ -113,16 +133,6 @@ start_ap: ...@@ -113,16 +133,6 @@ start_ap:
mov ar.fpsr=r2 mov ar.fpsr=r2
;; ;;
#ifdef CONFIG_IA64_EARLY_PRINTK
mov r3=(6<<8) | (IA64_GRANULE_SHIFT<<2)
movl r2=6<<61
;;
mov rr[r2]=r3
;;
srlz.i
;;
#endif
#define isAP p2 // are we an Application Processor? #define isAP p2 // are we an Application Processor?
#define isBP p3 // are we the Bootstrap Processor? #define isBP p3 // are we the Bootstrap Processor?
...@@ -143,12 +153,36 @@ start_ap: ...@@ -143,12 +153,36 @@ start_ap:
movl r2=init_thread_union movl r2=init_thread_union
cmp.eq isBP,isAP=r0,r0 cmp.eq isBP,isAP=r0,r0
#endif #endif
mov r16=KERNEL_TR_PAGE_NUM ;;
tpa r3=r2 // r3 == phys addr of task struct
// load mapping for stack (virtaddr in r2, physaddr in r3)
rsm psr.ic
movl r17=PAGE_KERNEL
;;
srlz.d
dep r18=0,r3,0,12
;;
or r18=r17,r18
dep r2=-1,r3,61,3 // IMVA of task
;;
mov r17=rr[r2]
shr.u r16=r3,IA64_GRANULE_SHIFT
;;
dep r17=0,r17,8,24
;;
mov cr.itir=r17
mov cr.ifa=r2
mov r19=IA64_TR_CURRENT_STACK
;;
itr.d dtr[r19]=r18
;;
ssm psr.ic
srlz.d
;; ;;
// load the "current" pointer (r13) and ar.k6 with the current task // load the "current" pointer (r13) and ar.k6 with the current task
mov IA64_KR(CURRENT)=r2 // virtual address mov IA64_KR(CURRENT)=r2 // virtual address
// initialize k4 to a safe value (64-128MB is mapped by TR_KERNEL)
mov IA64_KR(CURRENT_STACK)=r16 mov IA64_KR(CURRENT_STACK)=r16
mov r13=r2 mov r13=r2
/* /*
...@@ -665,14 +699,14 @@ GLOBAL_ENTRY(__ia64_init_fpu) ...@@ -665,14 +699,14 @@ GLOBAL_ENTRY(__ia64_init_fpu)
END(__ia64_init_fpu) END(__ia64_init_fpu)
/* /*
* Switch execution mode from virtual to physical or vice versa. * Switch execution mode from virtual to physical
* *
* Inputs: * Inputs:
* r16 = new psr to establish * r16 = new psr to establish
* *
* Note: RSE must already be in enforced lazy mode * Note: RSE must already be in enforced lazy mode
*/ */
GLOBAL_ENTRY(ia64_switch_mode) GLOBAL_ENTRY(ia64_switch_mode_phys)
{ {
alloc r2=ar.pfs,0,0,0,0 alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection rsm psr.i | psr.ic // disable interrupts and interrupt collection
...@@ -682,35 +716,86 @@ GLOBAL_ENTRY(ia64_switch_mode) ...@@ -682,35 +716,86 @@ GLOBAL_ENTRY(ia64_switch_mode)
{ {
flushrs // must be first insn in group flushrs // must be first insn in group
srlz.i srlz.i
shr.u r19=r15,61 // r19 <- top 3 bits of current IP
} }
;; ;;
mov cr.ipsr=r16 // set new PSR mov cr.ipsr=r16 // set new PSR
add r3=1f-ia64_switch_mode,r15 add r3=1f-ia64_switch_mode_phys,r15
xor r15=0x7,r19 // flip the region bits
mov r17=ar.bsp mov r17=ar.bsp
mov r14=rp // get return address into a general register mov r14=rp // get return address into a general register
;;
// switch RSE backing store: // going to physical mode, use tpa to translate virt->phys
tpa r17=r17
tpa r3=r3
tpa sp=sp
tpa r14=r14
;; ;;
dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
mov r18=ar.rnat // save ar.rnat mov r18=ar.rnat // save ar.rnat
;;
mov ar.bspstore=r17 // this steps on ar.rnat mov ar.bspstore=r17 // this steps on ar.rnat
dep r3=r15,r3,61,3 // make rfi return address physical or virtual mov cr.iip=r3
mov cr.ifs=r0
;; ;;
mov ar.rnat=r18 // restore ar.rnat
rfi // must be last insn in group
;;
1: mov rp=r14
br.ret.sptk.many rp
END(ia64_switch_mode_phys)
/*
* Switch execution mode from physical to virtual
*
* Inputs:
* r16 = new psr to establish
*
* Note: RSE must already be in enforced lazy mode
*/
GLOBAL_ENTRY(ia64_switch_mode_virt)
{
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
;;
{
flushrs // must be first insn in group
srlz.i
}
;;
mov cr.ipsr=r16 // set new PSR
add r3=1f-ia64_switch_mode_virt,r15
mov r17=ar.bsp
mov r14=rp // get return address into a general register
;;
// going to virtual
// - for code addresses, set upper bits of addr to KERNEL_START
// - for stack addresses, set upper 3 bits to 0xe.... Dont change any of the
// lower bits since we want it to stay identity mapped
movl r18=KERNEL_START
dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
dep r17=-1,r17,61,3
dep sp=-1,sp,61,3
;;
or r3=r3,r18
or r14=r14,r18
;;
mov r18=ar.rnat // save ar.rnat
mov ar.bspstore=r17 // this steps on ar.rnat
mov cr.iip=r3 mov cr.iip=r3
mov cr.ifs=r0 mov cr.ifs=r0
dep sp=r15,sp,61,3 // make stack pointer physical or virtual
;; ;;
mov ar.rnat=r18 // restore ar.rnat mov ar.rnat=r18 // restore ar.rnat
dep r14=r15,r14,61,3 // make function return address physical or virtual
rfi // must be last insn in group rfi // must be last insn in group
;; ;;
1: mov rp=r14 1: mov rp=r14
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia64_switch_mode) END(ia64_switch_mode_virt)
#ifdef CONFIG_IA64_BRL_EMU #ifdef CONFIG_IA64_BRL_EMU
......
...@@ -159,6 +159,7 @@ EXPORT_SYMBOL(efi_dir); ...@@ -159,6 +159,7 @@ EXPORT_SYMBOL(efi_dir);
EXPORT_SYMBOL(ia64_mv); EXPORT_SYMBOL(ia64_mv);
#endif #endif
EXPORT_SYMBOL(machvec_noop); EXPORT_SYMBOL(machvec_noop);
EXPORT_SYMBOL(zero_page_memmap_ptr);
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
#include <asm/perfmon.h> #include <asm/perfmon.h>
EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem); EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
......
...@@ -122,8 +122,13 @@ ENTRY(vhpt_miss) ...@@ -122,8 +122,13 @@ ENTRY(vhpt_miss)
shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
;; ;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
.global ia64_ivt_patch1
ia64_ivt_patch1:
{ .mlx // we patch this bundle to include physical address of swapper_pg_dir
srlz.d // ensure "rsm psr.dt" has taken effect srlz.d // ensure "rsm psr.dt" has taken effect
(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir (p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir
}
.pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;; ;;
...@@ -415,8 +420,13 @@ ENTRY(nested_dtlb_miss) ...@@ -415,8 +420,13 @@ ENTRY(nested_dtlb_miss)
shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
;; ;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
srlz.d .global ia64_ivt_patch2
(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir ia64_ivt_patch2:
{ .mlx // we patch this bundle to include physical address of swapper_pg_dir
srlz.d // ensure "rsm psr.dt" has taken effect
(p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir
}
.pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;; ;;
......
...@@ -662,17 +662,17 @@ ia64_mca_init(void) ...@@ -662,17 +662,17 @@ ia64_mca_init(void)
IA64_MCA_DEBUG("ia64_mca_init: registered mca rendezvous spinloop and wakeup mech.\n"); IA64_MCA_DEBUG("ia64_mca_init: registered mca rendezvous spinloop and wakeup mech.\n");
ia64_mc_info.imi_mca_handler = __pa(mca_hldlr_ptr->fp); ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
/* /*
* XXX - disable SAL checksum by setting size to 0; should be * XXX - disable SAL checksum by setting size to 0; should be
* __pa(ia64_os_mca_dispatch_end) - __pa(ia64_os_mca_dispatch); * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
*/ */
ia64_mc_info.imi_mca_handler_size = 0; ia64_mc_info.imi_mca_handler_size = 0;
/* Register the os mca handler with SAL */ /* Register the os mca handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
ia64_mc_info.imi_mca_handler, ia64_mc_info.imi_mca_handler,
mca_hldlr_ptr->gp, ia64_tpa(mca_hldlr_ptr->gp),
ia64_mc_info.imi_mca_handler_size, ia64_mc_info.imi_mca_handler_size,
0, 0, 0))) 0, 0, 0)))
{ {
...@@ -682,15 +682,15 @@ ia64_mca_init(void) ...@@ -682,15 +682,15 @@ ia64_mca_init(void)
} }
IA64_MCA_DEBUG("ia64_mca_init: registered os mca handler with SAL at 0x%lx, gp = 0x%lx\n", IA64_MCA_DEBUG("ia64_mca_init: registered os mca handler with SAL at 0x%lx, gp = 0x%lx\n",
ia64_mc_info.imi_mca_handler, mca_hldlr_ptr->gp); ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
/* /*
* XXX - disable SAL checksum by setting size to 0, should be * XXX - disable SAL checksum by setting size to 0, should be
* IA64_INIT_HANDLER_SIZE * IA64_INIT_HANDLER_SIZE
*/ */
ia64_mc_info.imi_monarch_init_handler = __pa(mon_init_ptr->fp); ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp);
ia64_mc_info.imi_monarch_init_handler_size = 0; ia64_mc_info.imi_monarch_init_handler_size = 0;
ia64_mc_info.imi_slave_init_handler = __pa(slave_init_ptr->fp); ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp);
ia64_mc_info.imi_slave_init_handler_size = 0; ia64_mc_info.imi_slave_init_handler_size = 0;
IA64_MCA_DEBUG("ia64_mca_init: os init handler at %lx\n", IA64_MCA_DEBUG("ia64_mca_init: os init handler at %lx\n",
...@@ -699,10 +699,10 @@ ia64_mca_init(void) ...@@ -699,10 +699,10 @@ ia64_mca_init(void)
/* Register the os init handler with SAL */ /* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler, ia64_mc_info.imi_monarch_init_handler,
__pa(ia64_get_gp()), ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler, ia64_mc_info.imi_slave_init_handler,
__pa(ia64_get_gp()), ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_slave_init_handler_size))) ia64_mc_info.imi_slave_init_handler_size)))
{ {
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. " printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
......
...@@ -164,7 +164,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) ...@@ -164,7 +164,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
;; ;;
mov loc4=ar.rsc // save RSE configuration mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical dep.z loc2=loc2,0,61 // convert pal entry point to physical
dep.z r8=r8,0,61 // convert rp to physical tpa r8=r8 // convert rp to physical
;; ;;
mov b7 = loc2 // install target to branch reg mov b7 = loc2 // install target to branch reg
mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov ar.rsc=0 // put RSE in enforced lazy, LE mode
...@@ -174,13 +174,13 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) ...@@ -174,13 +174,13 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
or loc3=loc3,r17 // add in psr the bits to set or loc3=loc3,r17 // add in psr the bits to set
;; ;;
andcm r16=loc3,r16 // removes bits to clear from psr andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode br.call.sptk.many rp=ia64_switch_mode_phys
.ret1: mov rp = r8 // install return address (physical) .ret1: mov rp = r8 // install return address (physical)
br.cond.sptk.many b7 br.cond.sptk.many b7
1: 1:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr mov r16=loc3 // r16= original psr
br.call.sptk.many rp=ia64_switch_mode // return to virtual mode br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2: .ret2:
mov psr.l = loc3 // restore init PSR mov psr.l = loc3 // restore init PSR
...@@ -228,13 +228,13 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) ...@@ -228,13 +228,13 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
mov b7 = loc2 // install target to branch reg mov b7 = loc2 // install target to branch reg
;; ;;
andcm r16=loc3,r16 // removes bits to clear from psr andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode br.call.sptk.many rp=ia64_switch_mode_phys
.ret6: .ret6:
br.call.sptk.many rp=b7 // now make the call br.call.sptk.many rp=b7 // now make the call
.ret7: .ret7:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr mov r16=loc3 // r16= original psr
br.call.sptk.many rp=ia64_switch_mode // return to virtual mode br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret8: mov psr.l = loc3 // restore init PSR .ret8: mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1 mov ar.pfs = loc1
......
...@@ -265,7 +265,7 @@ sort_regions (struct rsvd_region *rsvd_region, int max) ...@@ -265,7 +265,7 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
static void static void
find_memory (void) find_memory (void)
{ {
# define KERNEL_END ((unsigned long) &_end) # define KERNEL_END (&_end)
unsigned long bootmap_size; unsigned long bootmap_size;
unsigned long max_pfn; unsigned long max_pfn;
int n = 0; int n = 0;
...@@ -286,8 +286,8 @@ find_memory (void) ...@@ -286,8 +286,8 @@ find_memory (void)
+ strlen(__va(ia64_boot_param->command_line)) + 1); + strlen(__va(ia64_boot_param->command_line)) + 1);
n++; n++;
rsvd_region[n].start = KERNEL_START; rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
rsvd_region[n].end = KERNEL_END; rsvd_region[n].end = (unsigned long) ia64_imva(KERNEL_END);
n++; n++;
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
...@@ -347,6 +347,47 @@ find_memory (void) ...@@ -347,6 +347,47 @@ find_memory (void)
#endif #endif
} }
/*
* There are two places in the performance critical path of
* the exception handling code where we need to know the physical
* address of the swapper_pg_dir structure. This routine
* patches the "movl" instructions to load the value needed.
*/
static void __init
patch_ivt_with_phys_swapper_pg_dir(void)
{
extern char ia64_ivt_patch1[], ia64_ivt_patch2[];
unsigned long spd = ia64_tpa((__u64)swapper_pg_dir);
unsigned long *p;
p = (unsigned long *)ia64_imva(ia64_ivt_patch1);
*p = (*p & 0x3fffffffffffUL) |
((spd & 0x000000ffffc00000UL)<<24);
p++;
*p = (*p & 0xf000080fff800000UL) |
((spd & 0x8000000000000000UL) >> 4) |
((spd & 0x7fffff0000000000UL) >> 40) |
((spd & 0x00000000001f0000UL) << 29) |
((spd & 0x0000000000200000UL) << 23) |
((spd & 0x000000000000ff80UL) << 43) |
((spd & 0x000000000000007fUL) << 36);
p = (unsigned long *)ia64_imva(ia64_ivt_patch2);
*p = (*p & 0x3fffffffffffUL) |
((spd & 0x000000ffffc00000UL)<<24);
p++;
*p = (*p & 0xf000080fff800000UL) |
((spd & 0x8000000000000000UL) >> 4) |
((spd & 0x7fffff0000000000UL) >> 40) |
((spd & 0x00000000001f0000UL) << 29) |
((spd & 0x0000000000200000UL) << 23) |
((spd & 0x000000000000ff80UL) << 43) |
((spd & 0x000000000000007fUL) << 36);
}
void __init void __init
setup_arch (char **cmdline_p) setup_arch (char **cmdline_p)
{ {
...@@ -355,6 +396,8 @@ setup_arch (char **cmdline_p) ...@@ -355,6 +396,8 @@ setup_arch (char **cmdline_p)
unw_init(); unw_init();
patch_ivt_with_phys_swapper_pg_dir();
*cmdline_p = __va(ia64_boot_param->command_line); *cmdline_p = __va(ia64_boot_param->command_line);
strncpy(saved_command_line, *cmdline_p, sizeof(saved_command_line)); strncpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; /* for safety */ saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; /* for safety */
...@@ -751,7 +794,7 @@ cpu_init (void) ...@@ -751,7 +794,7 @@ cpu_init (void)
if (current->mm) if (current->mm)
BUG(); BUG();
ia64_mmu_init(cpu_data); ia64_mmu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
/* initialize global ia32 state - CR0 and CR4 */ /* initialize global ia32 state - CR0 and CR4 */
......
...@@ -598,7 +598,7 @@ init_smp_config(void) ...@@ -598,7 +598,7 @@ init_smp_config(void)
/* Tell SAL where to drop the AP's. */ /* Tell SAL where to drop the AP's. */
ap_startup = (struct fptr *) start_ap; ap_startup = (struct fptr *) start_ap;
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
__pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0); ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
if (sal_ret < 0) if (sal_ret < 0)
printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n", printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
ia64_sal_strerror(sal_ret)); ia64_sal_strerror(sal_ret));
......
...@@ -47,6 +47,8 @@ unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; ...@@ -47,6 +47,8 @@ unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
static int pgt_cache_water[2] = { 25, 50 }; static int pgt_cache_water[2] = { 25, 50 };
struct page *zero_page_memmap_ptr; /* map entry for zero page */
void void
check_pgt_cache (void) check_pgt_cache (void)
{ {
...@@ -112,14 +114,16 @@ ia64_init_addr_space (void) ...@@ -112,14 +114,16 @@ ia64_init_addr_space (void)
void void
free_initmem (void) free_initmem (void)
{ {
unsigned long addr; unsigned long addr, eaddr;
addr = (unsigned long) &__init_begin; addr = (unsigned long) ia64_imva(&__init_begin);
for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) { eaddr = (unsigned long) ia64_imva(&__init_end);
while (addr < eaddr) {
ClearPageReserved(virt_to_page(addr)); ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1); set_page_count(virt_to_page(addr), 1);
free_page(addr); free_page(addr);
++totalram_pages; ++totalram_pages;
addr += PAGE_SIZE;
} }
printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
(&__init_end - &__init_begin) >> 10); (&__init_end - &__init_begin) >> 10);
...@@ -269,7 +273,7 @@ put_gate_page (struct page *page, unsigned long address) ...@@ -269,7 +273,7 @@ put_gate_page (struct page *page, unsigned long address)
void __init void __init
ia64_mmu_init (void *my_cpu_data) ia64_mmu_init (void *my_cpu_data)
{ {
unsigned long psr, rid, pta, impl_va_bits; unsigned long psr, pta, impl_va_bits;
extern void __init tlb_init (void); extern void __init tlb_init (void);
#ifdef CONFIG_DISABLE_VHPT #ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0 # define VHPT_ENABLE_BIT 0
...@@ -277,21 +281,8 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -277,21 +281,8 @@ ia64_mmu_init (void *my_cpu_data)
# define VHPT_ENABLE_BIT 1 # define VHPT_ENABLE_BIT 1
#endif #endif
/* /* Pin mapping for percpu area into TLB */
* Set up the kernel identity mapping for regions 6 and 5. The mapping for region
* 7 is setup up in _start().
*/
psr = ia64_clear_ic(); psr = ia64_clear_ic();
rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET);
ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2));
rid = ia64_rid(IA64_REGION_ID_KERNEL, VMALLOC_START);
ia64_set_rr(VMALLOC_START, (rid << 8) | (PAGE_SHIFT << 2) | 1);
/* ensure rr6 is up-to-date before inserting the PERCPU_ADDR translation: */
ia64_srlz_d();
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
PERCPU_PAGE_SHIFT); PERCPU_PAGE_SHIFT);
...@@ -489,6 +480,7 @@ paging_init (void) ...@@ -489,6 +480,7 @@ paging_init (void)
discontig_paging_init(); discontig_paging_init();
efi_memmap_walk(count_pages, &num_physpages); efi_memmap_walk(count_pages, &num_physpages);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
} }
#else /* !CONFIG_DISCONTIGMEM */ #else /* !CONFIG_DISCONTIGMEM */
void void
...@@ -560,6 +552,7 @@ paging_init (void) ...@@ -560,6 +552,7 @@ paging_init (void)
} }
free_area_init(zones_size); free_area_init(zones_size);
# endif /* !CONFIG_VIRTUAL_MEM_MAP */ # endif /* !CONFIG_VIRTUAL_MEM_MAP */
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
} }
#endif /* !CONFIG_DISCONTIGMEM */ #endif /* !CONFIG_DISCONTIGMEM */
...@@ -630,7 +623,7 @@ mem_init (void) ...@@ -630,7 +623,7 @@ mem_init (void)
pgt_cache_water[1] = num_pgt_pages; pgt_cache_water[1] = num_pgt_pages;
/* install the gate page in the global page table: */ /* install the gate page in the global page table: */
put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR); put_gate_page(virt_to_page(ia64_imva(__start_gate_section)), GATE_ADDR);
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init(); ia32_gdt_init();
......
...@@ -3,8 +3,9 @@ ...@@ -3,8 +3,9 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h>
#define LOAD_OFFSET PAGE_OFFSET #define LOAD_OFFSET KERNEL_START + KERNEL_TR_PAGE_SIZE
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-ia64-little") OUTPUT_FORMAT("elf64-ia64-little")
...@@ -23,22 +24,22 @@ SECTIONS ...@@ -23,22 +24,22 @@ SECTIONS
} }
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */ v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
phys_start = _start - PAGE_OFFSET; phys_start = _start - LOAD_OFFSET;
. = KERNEL_START; . = KERNEL_START;
_text = .; _text = .;
_stext = .; _stext = .;
.text : AT(ADDR(.text) - PAGE_OFFSET) .text : AT(ADDR(.text) - LOAD_OFFSET)
{ {
*(.text.ivt) *(.text.ivt)
*(.text) *(.text)
} }
.text2 : AT(ADDR(.text2) - PAGE_OFFSET) .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) } { *(.text2) }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.text.lock : AT(ADDR(.text.lock) - PAGE_OFFSET) .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
{ *(.text.lock) } { *(.text.lock) }
#endif #endif
_etext = .; _etext = .;
...@@ -47,14 +48,14 @@ SECTIONS ...@@ -47,14 +48,14 @@ SECTIONS
/* Exception table */ /* Exception table */
. = ALIGN(16); . = ALIGN(16);
__ex_table : AT(ADDR(__ex_table) - PAGE_OFFSET) __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
{ {
__start___ex_table = .; __start___ex_table = .;
*(__ex_table) *(__ex_table)
__stop___ex_table = .; __stop___ex_table = .;
} }
__mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - PAGE_OFFSET) __mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - LOAD_OFFSET)
{ {
__start___mckinley_e9_bundles = .; __start___mckinley_e9_bundles = .;
*(__mckinley_e9_bundles) *(__mckinley_e9_bundles)
...@@ -67,7 +68,7 @@ SECTIONS ...@@ -67,7 +68,7 @@ SECTIONS
#if defined(CONFIG_IA64_GENERIC) #if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */ /* Machine Vector */
. = ALIGN(16); . = ALIGN(16);
.machvec : AT(ADDR(.machvec) - PAGE_OFFSET) .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
{ {
machvec_start = .; machvec_start = .;
*(.machvec) *(.machvec)
...@@ -77,9 +78,9 @@ SECTIONS ...@@ -77,9 +78,9 @@ SECTIONS
/* Unwind info & table: */ /* Unwind info & table: */
. = ALIGN(8); . = ALIGN(8);
.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET) .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
{ *(.IA_64.unwind_info*) } { *(.IA_64.unwind_info*) }
.IA_64.unwind : AT(ADDR(.IA_64.unwind) - PAGE_OFFSET) .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
{ {
ia64_unw_start = .; ia64_unw_start = .;
*(.IA_64.unwind*) *(.IA_64.unwind*)
...@@ -88,24 +89,24 @@ SECTIONS ...@@ -88,24 +89,24 @@ SECTIONS
RODATA RODATA
.opd : AT(ADDR(.opd) - PAGE_OFFSET) .opd : AT(ADDR(.opd) - LOAD_OFFSET)
{ *(.opd) } { *(.opd) }
/* Initialization code and data: */ /* Initialization code and data: */
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_begin = .; __init_begin = .;
.init.text : AT(ADDR(.init.text) - PAGE_OFFSET) .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
{ {
_sinittext = .; _sinittext = .;
*(.init.text) *(.init.text)
_einittext = .; _einittext = .;
} }
.init.data : AT(ADDR(.init.data) - PAGE_OFFSET) .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
{ *(.init.data) } { *(.init.data) }
.init.ramfs : AT(ADDR(.init.ramfs) - PAGE_OFFSET) .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
{ {
__initramfs_start = .; __initramfs_start = .;
*(.init.ramfs) *(.init.ramfs)
...@@ -113,19 +114,19 @@ SECTIONS ...@@ -113,19 +114,19 @@ SECTIONS
} }
. = ALIGN(16); . = ALIGN(16);
.init.setup : AT(ADDR(.init.setup) - PAGE_OFFSET) .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
{ {
__setup_start = .; __setup_start = .;
*(.init.setup) *(.init.setup)
__setup_end = .; __setup_end = .;
} }
__param : AT(ADDR(__param) - PAGE_OFFSET) __param : AT(ADDR(__param) - LOAD_OFFSET)
{ {
__start___param = .; __start___param = .;
*(__param) *(__param)
__stop___param = .; __stop___param = .;
} }
.initcall.init : AT(ADDR(.initcall.init) - PAGE_OFFSET) .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
{ {
__initcall_start = .; __initcall_start = .;
*(.initcall1.init) *(.initcall1.init)
...@@ -138,17 +139,17 @@ SECTIONS ...@@ -138,17 +139,17 @@ SECTIONS
__initcall_end = .; __initcall_end = .;
} }
__con_initcall_start = .; __con_initcall_start = .;
.con_initcall.init : AT(ADDR(.con_initcall.init) - PAGE_OFFSET) .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) } { *(.con_initcall.init) }
__con_initcall_end = .; __con_initcall_end = .;
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
/* The initial task and kernel stack */ /* The initial task and kernel stack */
.data.init_task : AT(ADDR(.data.init_task) - PAGE_OFFSET) .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
{ *(.data.init_task) } { *(.data.init_task) }
.data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET) .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
{ *(__special_page_section) { *(__special_page_section)
__start_gate_section = .; __start_gate_section = .;
*(.text.gate) *(.text.gate)
...@@ -156,13 +157,13 @@ SECTIONS ...@@ -156,13 +157,13 @@ SECTIONS
} }
. = ALIGN(SMP_CACHE_BYTES); . = ALIGN(SMP_CACHE_BYTES);
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET) .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
{ *(.data.cacheline_aligned) } { *(.data.cacheline_aligned) }
/* Per-cpu data: */ /* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE); . = ALIGN(PERCPU_PAGE_SIZE);
__phys_per_cpu_start = .; __phys_per_cpu_start = .;
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET) .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{ {
__per_cpu_start = .; __per_cpu_start = .;
*(.data.percpu) *(.data.percpu)
...@@ -170,24 +171,24 @@ SECTIONS ...@@ -170,24 +171,24 @@ SECTIONS
} }
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
.data : AT(ADDR(.data) - PAGE_OFFSET) .data : AT(ADDR(.data) - LOAD_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS } { *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16); . = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */ __gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
.got : AT(ADDR(.got) - PAGE_OFFSET) .got : AT(ADDR(.got) - LOAD_OFFSET)
{ *(.got.plt) *(.got) } { *(.got.plt) *(.got) }
/* We want the small data sections together, so single-instruction offsets /* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */ we can shorten the on-disk segment size. */
.sdata : AT(ADDR(.sdata) - PAGE_OFFSET) .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) } { *(.sdata) }
_edata = .; _edata = .;
_bss = .; _bss = .;
.sbss : AT(ADDR(.sbss) - PAGE_OFFSET) .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
{ *(.sbss) *(.scommon) } { *(.sbss) *(.scommon) }
.bss : AT(ADDR(.bss) - PAGE_OFFSET) .bss : AT(ADDR(.bss) - LOAD_OFFSET)
{ *(.bss) *(COMMON) } { *(.bss) *(COMMON) }
_end = .; _end = .;
......
...@@ -207,7 +207,7 @@ ia64_phys_addr_valid (unsigned long addr) ...@@ -207,7 +207,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
#define RGN_KERNEL 7 #define RGN_KERNEL 7
#define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE) #define VMALLOC_START 0xa000000200000000
#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_VMADDR(x) ((unsigned long)(x))
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) # define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
...@@ -450,7 +450,8 @@ extern void paging_init (void); ...@@ -450,7 +450,8 @@ extern void paging_init (void);
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
*/ */
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern struct page *zero_page_memmap_ptr;
#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
/* We provide our own get_unmapped_area to cope with VA holes for userland */ /* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
...@@ -481,7 +482,6 @@ typedef pte_t *pte_addr_t; ...@@ -481,7 +482,6 @@ typedef pte_t *pte_addr_t;
*/ */
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
#define KERNEL_TR_PAGE_NUM ((KERNEL_START - PAGE_OFFSET) / KERNEL_TR_PAGE_SIZE)
/* /*
* No page table caches to initialise * No page table caches to initialise
......
...@@ -929,6 +929,18 @@ ia64_tpa (__u64 addr) ...@@ -929,6 +929,18 @@ ia64_tpa (__u64 addr)
return result; return result;
} }
/*
* Take a mapped kernel address and return the equivalent address
* in the region 7 identity mapped virtual area.
*/
static inline void *
ia64_imva (void *addr)
{
void *result;
asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
return __va(result);
}
#define ARCH_HAS_PREFETCH #define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW #define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH #define ARCH_HAS_SPINLOCK_PREFETCH
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <asm/pal.h> #include <asm/pal.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#define KERNEL_START (PAGE_OFFSET + 68*1024*1024) #define KERNEL_START (0xa000000100000000)
/* 0xa000000000000000 - 0xa000000000000000+PERCPU_MAX_SIZE remain unmapped */ /* 0xa000000000000000 - 0xa000000000000000+PERCPU_MAX_SIZE remain unmapped */
#define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE) #define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment