Commit db053b86 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

xen: clean up x86-64 warnings

There are a couple of Xen features which rely on directly accessing
per-cpu data via a segment register, which is not yet available on
x86-64.  In the meantime, just disable direct access to the vcpu info
structure; this leaves some of the code as dead, but it will come to
life in time, and the warnings are suppressed.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 08115ab4
...@@ -112,7 +112,14 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; ...@@ -112,7 +112,14 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
* *
* 0: not available, 1: available * 0: not available, 1: available
*/ */
static int have_vcpu_info_placement = 1; static int have_vcpu_info_placement =
#ifdef CONFIG_X86_32
1
#else
0
#endif
;
static void xen_vcpu_setup(int cpu) static void xen_vcpu_setup(int cpu)
{ {
...@@ -941,6 +948,7 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) ...@@ -941,6 +948,7 @@ static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
} }
#endif #endif
#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{ {
/* If there's an existing pte, then don't allow _PAGE_RW to be set */ /* If there's an existing pte, then don't allow _PAGE_RW to be set */
...@@ -959,6 +967,7 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) ...@@ -959,6 +967,7 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
xen_set_pte(ptep, pte); xen_set_pte(ptep, pte);
} }
#endif
static __init void xen_pagetable_setup_start(pgd_t *base) static __init void xen_pagetable_setup_start(pgd_t *base)
{ {
...@@ -1025,7 +1034,6 @@ void xen_setup_vcpu_info_placement(void) ...@@ -1025,7 +1034,6 @@ void xen_setup_vcpu_info_placement(void)
/* xen_vcpu_setup managed to place the vcpu_info within the /* xen_vcpu_setup managed to place the vcpu_info within the
percpu area for all cpus, so make use of it */ percpu area for all cpus, so make use of it */
#ifdef CONFIG_X86_32
if (have_vcpu_info_placement) { if (have_vcpu_info_placement) {
printk(KERN_INFO "Xen: using vcpu_info placement\n"); printk(KERN_INFO "Xen: using vcpu_info placement\n");
...@@ -1035,7 +1043,6 @@ void xen_setup_vcpu_info_placement(void) ...@@ -1035,7 +1043,6 @@ void xen_setup_vcpu_info_placement(void)
pv_irq_ops.irq_enable = xen_irq_enable_direct; pv_irq_ops.irq_enable = xen_irq_enable_direct;
pv_mmu_ops.read_cr2 = xen_read_cr2_direct; pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
} }
#endif
} }
static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
...@@ -1056,12 +1063,10 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, ...@@ -1056,12 +1063,10 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
goto patch_site goto patch_site
switch (type) { switch (type) {
#ifdef CONFIG_X86_32
SITE(pv_irq_ops, irq_enable); SITE(pv_irq_ops, irq_enable);
SITE(pv_irq_ops, irq_disable); SITE(pv_irq_ops, irq_disable);
SITE(pv_irq_ops, save_fl); SITE(pv_irq_ops, save_fl);
SITE(pv_irq_ops, restore_fl); SITE(pv_irq_ops, restore_fl);
#endif /* CONFIG_X86_32 */
#undef SITE #undef SITE
patch_site: patch_site:
...@@ -1399,48 +1404,11 @@ static void *m2v(phys_addr_t maddr) ...@@ -1399,48 +1404,11 @@ static void *m2v(phys_addr_t maddr)
return __ka(m2p(maddr)); return __ka(m2p(maddr));
} }
#ifdef CONFIG_X86_64
static void walk(pgd_t *pgd, unsigned long addr)
{
unsigned l4idx = pgd_index(addr);
unsigned l3idx = pud_index(addr);
unsigned l2idx = pmd_index(addr);
unsigned l1idx = pte_index(addr);
pgd_t l4;
pud_t l3;
pmd_t l2;
pte_t l1;
xen_raw_printk("walk %p, %lx -> %d %d %d %d\n",
pgd, addr, l4idx, l3idx, l2idx, l1idx);
l4 = pgd[l4idx];
xen_raw_printk(" l4: %016lx\n", l4.pgd);
xen_raw_printk(" %016lx\n", pgd_val(l4));
l3 = ((pud_t *)(m2v(l4.pgd)))[l3idx];
xen_raw_printk(" l3: %016lx\n", l3.pud);
xen_raw_printk(" %016lx\n", pud_val(l3));
l2 = ((pmd_t *)(m2v(l3.pud)))[l2idx];
xen_raw_printk(" l2: %016lx\n", l2.pmd);
xen_raw_printk(" %016lx\n", pmd_val(l2));
l1 = ((pte_t *)(m2v(l2.pmd)))[l1idx];
xen_raw_printk(" l1: %016lx\n", l1.pte);
xen_raw_printk(" %016lx\n", pte_val(l1));
}
#endif
static void set_page_prot(void *addr, pgprot_t prot) static void set_page_prot(void *addr, pgprot_t prot)
{ {
unsigned long pfn = __pa(addr) >> PAGE_SHIFT; unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot); pte_t pte = pfn_pte(pfn, prot);
xen_raw_printk("addr=%p pfn=%lx mfn=%lx prot=%016llx pte=%016llx\n",
addr, pfn, get_phys_to_machine(pfn),
pgprot_val(prot), pte.pte);
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
BUG(); BUG();
} }
...@@ -1698,15 +1666,6 @@ asmlinkage void __init xen_start_kernel(void) ...@@ -1698,15 +1666,6 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("about to get started...\n"); xen_raw_console_write("about to get started...\n");
#if 0
xen_raw_printk("&boot_params=%p __pa(&boot_params)=%lx __va(__pa(&boot_params))=%lx\n",
&boot_params, __pa_symbol(&boot_params),
__va(__pa_symbol(&boot_params)));
walk(pgd, &boot_params);
walk(pgd, __va(__pa(&boot_params)));
#endif
/* Start the world */ /* Start the world */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
i386_start_kernel(); i386_start_kernel();
......
...@@ -26,8 +26,15 @@ ...@@ -26,8 +26,15 @@
/* Pseudo-flag used for virtual NMI, which we don't implement yet */ /* Pseudo-flag used for virtual NMI, which we don't implement yet */
#define XEN_EFLAGS_NMI 0x80000000 #define XEN_EFLAGS_NMI 0x80000000
#if 0 #if 1
#include <asm/percpu.h> /*
x86-64 does not yet support direct access to percpu variables
via a segment override, so we just need to make sure this code
never gets used
*/
#define BUG ud2a
#define PER_CPU_VAR(var, off) 0xdeadbeef
#endif
/* /*
Enable events. This clears the event mask and tests the pending Enable events. This clears the event mask and tests the pending
...@@ -35,6 +42,8 @@ ...@@ -35,6 +42,8 @@
events, then enter the hypervisor to get them handled. events, then enter the hypervisor to get them handled.
*/ */
ENTRY(xen_irq_enable_direct) ENTRY(xen_irq_enable_direct)
BUG
/* Unmask events */ /* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
...@@ -58,6 +67,8 @@ ENDPATCH(xen_irq_enable_direct) ...@@ -58,6 +67,8 @@ ENDPATCH(xen_irq_enable_direct)
non-zero. non-zero.
*/ */
ENTRY(xen_irq_disable_direct) ENTRY(xen_irq_disable_direct)
BUG
movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
ENDPATCH(xen_irq_disable_direct) ENDPATCH(xen_irq_disable_direct)
ret ret
...@@ -74,6 +85,8 @@ ENDPATCH(xen_irq_disable_direct) ...@@ -74,6 +85,8 @@ ENDPATCH(xen_irq_disable_direct)
Xen and x86 use opposite senses (mask vs enable). Xen and x86 use opposite senses (mask vs enable).
*/ */
ENTRY(xen_save_fl_direct) ENTRY(xen_save_fl_direct)
BUG
testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
setz %ah setz %ah
addb %ah,%ah addb %ah,%ah
...@@ -91,6 +104,8 @@ ENDPATCH(xen_save_fl_direct) ...@@ -91,6 +104,8 @@ ENDPATCH(xen_save_fl_direct)
if so. if so.
*/ */
ENTRY(xen_restore_fl_direct) ENTRY(xen_restore_fl_direct)
BUG
testb $X86_EFLAGS_IF>>8, %ah testb $X86_EFLAGS_IF>>8, %ah
setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
/* Preempt here doesn't matter because that will deal with /* Preempt here doesn't matter because that will deal with
...@@ -133,7 +148,6 @@ check_events: ...@@ -133,7 +148,6 @@ check_events:
pop %rcx pop %rcx
pop %rax pop %rax
ret ret
#endif
ENTRY(xen_adjust_exception_frame) ENTRY(xen_adjust_exception_frame)
mov 8+0(%rsp),%rcx mov 8+0(%rsp),%rcx
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment