Commit 7f2f8822 authored by David Vrabel's avatar David Vrabel Committed by Stefano Stabellini

x86/xen: do not use _PAGE_IOMAP PTE flag for I/O mappings

Since mfn_to_pfn() returns the correct PFN for identity mappings (as
used for MMIO regions), the use of _PAGE_IOMAP is not required in
pte_mfn_to_pfn().

Do not set the _PAGE_IOMAP flag in pte_pfn_to_mfn() and do not use it
in pte_mfn_to_pfn().

This will allow _PAGE_IOMAP to be removed, making it available for
future use.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 31668511
...@@ -399,38 +399,14 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) ...@@ -399,38 +399,14 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
if (unlikely(mfn == INVALID_P2M_ENTRY)) { if (unlikely(mfn == INVALID_P2M_ENTRY)) {
mfn = 0; mfn = 0;
flags = 0; flags = 0;
} else { } else
/* mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
* Paramount to do this test _after_ the
* INVALID_P2M_ENTRY as INVALID_P2M_ENTRY &
* IDENTITY_FRAME_BIT resolves to true.
*/
mfn &= ~FOREIGN_FRAME_BIT;
if (mfn & IDENTITY_FRAME_BIT) {
mfn &= ~IDENTITY_FRAME_BIT;
flags |= _PAGE_IOMAP;
}
}
val = ((pteval_t)mfn << PAGE_SHIFT) | flags; val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
} }
return val; return val;
} }
static pteval_t iomap_pte(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
/* We assume the pte frame number is a MFN, so
just use it as-is. */
val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
}
return val;
}
__visible pteval_t xen_pte_val(pte_t pte) __visible pteval_t xen_pte_val(pte_t pte)
{ {
pteval_t pteval = pte.pte; pteval_t pteval = pte.pte;
...@@ -441,9 +417,6 @@ __visible pteval_t xen_pte_val(pte_t pte) ...@@ -441,9 +417,6 @@ __visible pteval_t xen_pte_val(pte_t pte)
pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
} }
#endif #endif
if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
return pteval;
return pte_mfn_to_pfn(pteval); return pte_mfn_to_pfn(pteval);
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
...@@ -481,7 +454,6 @@ void xen_set_pat(u64 pat) ...@@ -481,7 +454,6 @@ void xen_set_pat(u64 pat)
__visible pte_t xen_make_pte(pteval_t pte) __visible pte_t xen_make_pte(pteval_t pte)
{ {
phys_addr_t addr = (pte & PTE_PFN_MASK);
#if 0 #if 0
/* If Linux is trying to set a WC pte, then map to the Xen WC. /* If Linux is trying to set a WC pte, then map to the Xen WC.
* If _PAGE_PAT is set, then it probably means it is really * If _PAGE_PAT is set, then it probably means it is really
...@@ -496,19 +468,7 @@ __visible pte_t xen_make_pte(pteval_t pte) ...@@ -496,19 +468,7 @@ __visible pte_t xen_make_pte(pteval_t pte)
pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
} }
#endif #endif
/* pte = pte_pfn_to_mfn(pte);
* Unprivileged domains are allowed to do IOMAPpings for
* PCI passthrough, but not map ISA space. The ISA
* mappings are just dummy local mappings to keep other
* parts of the kernel happy.
*/
if (unlikely(pte & _PAGE_IOMAP) &&
(xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
pte = iomap_pte(pte);
} else {
pte &= ~_PAGE_IOMAP;
pte = pte_pfn_to_mfn(pte);
}
return native_make_pte(pte); return native_make_pte(pte);
} }
...@@ -2091,7 +2051,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) ...@@ -2091,7 +2051,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
default: default:
/* By default, set_fixmap is used for hardware mappings */ /* By default, set_fixmap is used for hardware mappings */
pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP)); pte = mfn_pte(phys, prot);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment