Commit 27993c04 authored by Tony Luck's avatar Tony Luck Committed by David Mosberger

[PATCH] ia64: provide a more generic vtop patching infrastructure

We need sometimes to load the physical address of a kernel
object.  Often we can convert the virtual address to physical
at execution time, but sometimes (either for performance reasons
or during error recovery) we cannot to this.  Patch the marked
bundles to load the physical address.
parent e2c564ed
......@@ -127,12 +127,9 @@ ENTRY(vhpt_miss)
shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
.global ia64_ivt_patch1
ia64_ivt_patch1:
{ .mlx // we patch this bundle to include physical address of swapper_pg_dir
srlz.d // ensure "rsm psr.dt" has taken effect
(p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir
}
LOAD_PHYSICAL(srlz.d, p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
.pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
......@@ -425,12 +422,9 @@ ENTRY(nested_dtlb_miss)
shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
.global ia64_ivt_patch2
ia64_ivt_patch2:
{ .mlx // we patch this bundle to include physical address of swapper_pg_dir
srlz.d // ensure "rsm psr.dt" has taken effect
(p6) movl r19=swapper_pg_dir // region 5 is rooted at swapper_pg_dir
}
LOAD_PHYSICAL(srlz.d, p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
.pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
......
......@@ -349,43 +349,51 @@ find_memory (void)
}
/*
* There are two places in the performance critical path of
* the exception handling code where we need to know the physical
* address of the swapper_pg_dir structure. This routine
* patches the "movl" instructions to load the value needed.
* We need sometimes to load the physical address of a kernel
* object. Often we can convert the virtual address to physical
* at execution time, but sometimes (either for performance reasons
* or during error recovery) we cannot to this. Patch the marked
* bundles to load the physical address.
* The 64-bit value in a "movl reg=value" is scattered between the
* two words of the bundle like this:
*
* 6 6 5 4 3 2 1
* 3210987654321098765432109876543210987654321098765432109876543210
* ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
*
* CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
* xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
*/
static void __init
patch_ivt_with_phys_swapper_pg_dir(void)
patch_physical (void)
{
extern char ia64_ivt_patch1[], ia64_ivt_patch2[];
unsigned long spd = ia64_tpa((__u64)swapper_pg_dir);
unsigned long *p;
p = (unsigned long *)ia64_imva(ia64_ivt_patch1);
*p = (*p & 0x3fffffffffffUL) |
((spd & 0x000000ffffc00000UL)<<24);
p++;
*p = (*p & 0xf000080fff800000UL) |
((spd & 0x8000000000000000UL) >> 4) |
((spd & 0x7fffff0000000000UL) >> 40) |
((spd & 0x00000000001f0000UL) << 29) |
((spd & 0x0000000000200000UL) << 23) |
((spd & 0x000000000000ff80UL) << 43) |
((spd & 0x000000000000007fUL) << 36);
p = (unsigned long *)ia64_imva(ia64_ivt_patch2);
*p = (*p & 0x3fffffffffffUL) |
((spd & 0x000000ffffc00000UL)<<24);
p++;
*p = (*p & 0xf000080fff800000UL) |
((spd & 0x8000000000000000UL) >> 4) |
((spd & 0x7fffff0000000000UL) >> 40) |
((spd & 0x00000000001f0000UL) << 29) |
((spd & 0x0000000000200000UL) << 23) |
((spd & 0x000000000000ff80UL) << 43) |
((spd & 0x000000000000007fUL) << 36);
extern unsigned long *__start___vtop_patchlist[], *__end____vtop_patchlist[];
unsigned long **e, *p, paddr, vaddr;
for (e = __start___vtop_patchlist; e < __end____vtop_patchlist; e++) {
p = *e;
vaddr = ((p[1] & 0x0800000000000000UL) << 4) | /*A*/
((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
((p[1] & 0x000007f000000000UL) >> 36); /*G*/
paddr = ia64_tpa(vaddr);
*p = (*p & 0x3fffffffffffUL) |
((paddr & 0x000000ffffc00000UL)<<24); /*C*/
p++;
*p = (*p & 0xf000080fff800000UL) |
((paddr & 0x8000000000000000UL) >> 4) | /*A*/
((paddr & 0x7fffff0000000000UL) >> 40) | /*B*/
((paddr & 0x0000000000200000UL) << 23) | /*D*/
((paddr & 0x00000000001f0000UL) << 29) | /*E*/
((paddr & 0x000000000000ff80UL) << 43) | /*F*/
((paddr & 0x000000000000007fUL) << 36); /*G*/
}
}
......@@ -397,7 +405,7 @@ setup_arch (char **cmdline_p)
unw_init();
patch_ivt_with_phys_swapper_pg_dir();
patch_physical();
*cmdline_p = __va(ia64_boot_param->command_line);
strncpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
......
......@@ -55,6 +55,13 @@ SECTIONS
__stop___ex_table = .;
}
__vtop_patchlist : AT(ADDR(__vtop_patchlist) - LOAD_OFFSET)
{
__start___vtop_patchlist = .;
*(__vtop_patchlist)
__end____vtop_patchlist = .;
}
__mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
......
......@@ -50,6 +50,22 @@
.xdata4 "__ex_table", 99f-., y-.+4; \
[99:] x
/*
* Mark instructions that need a load of a virtual address patched to be
* a load of a physical address. We use this either in critical performance
* path (ivt.S - TLB miss processing) or in places where it might not be
* safe to use a "tpa" instruction (mca_asm.S - error recovery).
*/
.section "__vtop_patchlist", "a" // declare section & section attributes
.previous
#define LOAD_PHYSICAL(op, preg, reg, obj) \
1: { .mlx; \
op; \
(preg) movl reg = obj; \
}; \
.xdata8 "__vtop_patchlist", 1b
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
* we'll patch out the work-around bundles with NOPs, so their impact is minimal.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment