Commit 41925b10 authored by Juergen Gross's avatar Juergen Gross

xen: replace xen_remap() with memremap()

xen_remap() is used to establish mappings for frames not under direct
control of the kernel: for Xenstore and console ring pages, and for
grant pages of non-PV guests.

Today xen_remap() is defined to use ioremap() on x86 (doing uncached
mappings), and ioremap_cache() on Arm (doing cached mappings).

Uncached mappings for those use cases are bad for performance, so they
should be avoided if possible. As all use cases of xen_remap() don't
require uncached mappings (the mapped area is always physical RAM),
a mapping using the standard WB cache mode is fine.

As sparse is flagging some of the xen_remap() use cases to be not
appropriate for iomem(), as the result is not annotated with the
__iomem modifier, eliminate xen_remap() completely and replace all
use cases with memremap() specifying the MEMREMAP_WB caching mode.

xen_unmap() can be replaced with memunmap().
Reported-by: default avatarkernel test robot <lkp@intel.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Link: https://lore.kernel.org/r/20220530082634.6339-1-jgross@suse.comSigned-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 0a19bab5
...@@ -347,9 +347,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr); ...@@ -347,9 +347,6 @@ unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr); void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr); void make_lowmem_page_readwrite(void *vaddr);
#define xen_remap(cookie, size) ioremap((cookie), (size))
#define xen_unmap(cookie) iounmap((cookie))
static inline bool xen_arch_need_swiotlb(struct device *dev, static inline bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys, phys_addr_t phys,
dma_addr_t dev_addr) dma_addr_t dev_addr)
......
...@@ -253,7 +253,7 @@ static int xen_hvm_console_init(void) ...@@ -253,7 +253,7 @@ static int xen_hvm_console_init(void)
if (r < 0 || v == 0) if (r < 0 || v == 0)
goto err; goto err;
gfn = v; gfn = v;
info->intf = xen_remap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE); info->intf = memremap(gfn << XEN_PAGE_SHIFT, XEN_PAGE_SIZE, MEMREMAP_WB);
if (info->intf == NULL) if (info->intf == NULL)
goto err; goto err;
info->vtermno = HVC_COOKIE; info->vtermno = HVC_COOKIE;
......
...@@ -632,7 +632,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr) ...@@ -632,7 +632,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
if (xen_auto_xlat_grant_frames.count) if (xen_auto_xlat_grant_frames.count)
return -EINVAL; return -EINVAL;
vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes); vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
if (vaddr == NULL) { if (vaddr == NULL) {
pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n", pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
&addr); &addr);
...@@ -640,7 +640,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr) ...@@ -640,7 +640,7 @@ int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
} }
pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
if (!pfn) { if (!pfn) {
xen_unmap(vaddr); memunmap(vaddr);
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < max_nr_gframes; i++) for (i = 0; i < max_nr_gframes; i++)
...@@ -659,7 +659,7 @@ void gnttab_free_auto_xlat_frames(void) ...@@ -659,7 +659,7 @@ void gnttab_free_auto_xlat_frames(void)
if (!xen_auto_xlat_grant_frames.count) if (!xen_auto_xlat_grant_frames.count)
return; return;
kfree(xen_auto_xlat_grant_frames.pfn); kfree(xen_auto_xlat_grant_frames.pfn);
xen_unmap(xen_auto_xlat_grant_frames.vaddr); memunmap(xen_auto_xlat_grant_frames.vaddr);
xen_auto_xlat_grant_frames.pfn = NULL; xen_auto_xlat_grant_frames.pfn = NULL;
xen_auto_xlat_grant_frames.count = 0; xen_auto_xlat_grant_frames.count = 0;
......
...@@ -752,8 +752,8 @@ static void xenbus_probe(void) ...@@ -752,8 +752,8 @@ static void xenbus_probe(void)
xenstored_ready = 1; xenstored_ready = 1;
if (!xen_store_interface) { if (!xen_store_interface) {
xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
XEN_PAGE_SIZE); XEN_PAGE_SIZE, MEMREMAP_WB);
/* /*
* Now it is safe to free the IRQ used for xenstore late * Now it is safe to free the IRQ used for xenstore late
* initialization. No need to unbind: it is about to be * initialization. No need to unbind: it is about to be
...@@ -1009,8 +1009,8 @@ static int __init xenbus_init(void) ...@@ -1009,8 +1009,8 @@ static int __init xenbus_init(void)
#endif #endif
xen_store_gfn = (unsigned long)v; xen_store_gfn = (unsigned long)v;
xen_store_interface = xen_store_interface =
xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, memremap(xen_store_gfn << XEN_PAGE_SHIFT,
XEN_PAGE_SIZE); XEN_PAGE_SIZE, MEMREMAP_WB);
if (xen_store_interface->connection != XENSTORE_CONNECTED) if (xen_store_interface->connection != XENSTORE_CONNECTED)
wait = true; wait = true;
} }
......
...@@ -109,9 +109,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -109,9 +109,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return __set_phys_to_machine(pfn, mfn); return __set_phys_to_machine(pfn, mfn);
} }
#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
#define xen_unmap(cookie) iounmap((cookie))
bool xen_arch_need_swiotlb(struct device *dev, bool xen_arch_need_swiotlb(struct device *dev,
phys_addr_t phys, phys_addr_t phys,
dma_addr_t dev_addr); dma_addr_t dev_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment