Commit 18e780b4 authored by Kefeng Wang's avatar Kefeng Wang Committed by Will Deacon

mm: ioremap: Add ioremap/iounmap_allowed()

Add special hook for architecture to verify addr, size or prot
when ioremap() or iounmap(), which will make the generic ioremap
more useful.

  ioremap_allowed() return a bool,
    - true means continue to remap
    - false means skip remap and return directly
  iounmap_allowed() return a bool,
    - true means continue to vunmap
    - false code means skip vunmap and return directly

Meanwhile, only vunmap the address when it is in vmalloc area
as the generic ioremap only returns vmalloc addresses.
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBaoquan He <bhe@redhat.com>
Link: https://lore.kernel.org/r/20220607125027.44946-5-wangkefeng.wang@huawei.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent a14fff1c
...@@ -964,6 +964,32 @@ static inline void iounmap(volatile void __iomem *addr) ...@@ -964,6 +964,32 @@ static inline void iounmap(volatile void __iomem *addr)
#elif defined(CONFIG_GENERIC_IOREMAP) #elif defined(CONFIG_GENERIC_IOREMAP)
#include <linux/pgtable.h> #include <linux/pgtable.h>
/*
* Arch code can implement the following two hooks when using GENERIC_IOREMAP
* ioremap_allowed() return a bool,
* - true means continue to remap
* - false means skip remap and return directly
* iounmap_allowed() return a bool,
* - true means continue to vunmap
* - false means skip vunmap and return directly
*/
#ifndef ioremap_allowed
#define ioremap_allowed ioremap_allowed
static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
return true;
}
#endif
#ifndef iounmap_allowed
#define iounmap_allowed iounmap_allowed
static inline bool iounmap_allowed(void *addr)
{
return true;
}
#endif
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot); unsigned long prot);
void iounmap(volatile void __iomem *addr); void iounmap(volatile void __iomem *addr);
......
...@@ -28,6 +28,9 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, ...@@ -28,6 +28,9 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
phys_addr -= offset; phys_addr -= offset;
size = PAGE_ALIGN(size + offset); size = PAGE_ALIGN(size + offset);
if (!ioremap_allowed(phys_addr, size, prot))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, area = get_vm_area_caller(size, VM_IOREMAP,
__builtin_return_address(0)); __builtin_return_address(0));
if (!area) if (!area)
...@@ -47,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot); ...@@ -47,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot);
void iounmap(volatile void __iomem *addr) void iounmap(volatile void __iomem *addr)
{ {
vunmap((void *)((unsigned long)addr & PAGE_MASK)); void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
if (!iounmap_allowed(vaddr))
return;
if (is_vmalloc_addr(vaddr))
vunmap(vaddr);
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment