Commit d806e5ee authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: cpa: convert ioremap to new API

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 5f868152
...@@ -19,6 +19,11 @@ ...@@ -19,6 +19,11 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
enum ioremap_mode {
IOR_MODE_UNCACHED,
IOR_MODE_CACHED,
};
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
unsigned long __phys_addr(unsigned long x) unsigned long __phys_addr(unsigned long x)
...@@ -64,19 +69,17 @@ int page_is_ram(unsigned long pagenr) ...@@ -64,19 +69,17 @@ int page_is_ram(unsigned long pagenr)
* Fix up the linear direct mapping of the kernel to avoid cache attribute * Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts. * conflicts.
*/ */
static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, static int ioremap_change_attr(unsigned long paddr, unsigned long size,
pgprot_t prot) enum ioremap_mode mode)
{ {
unsigned long npages, vaddr, last_addr = phys_addr + size - 1; unsigned long vaddr = (unsigned long)__va(paddr);
unsigned long nrpages = size >> PAGE_SHIFT;
int err, level; int err, level;
/* No change for pages after the last mapping */ /* No change for pages after the last mapping */
if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
return 0; return 0;
npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
vaddr = (unsigned long) __va(phys_addr);
/* /*
* If there is no identity map for this address, * If there is no identity map for this address,
* change_page_attr_addr is unnecessary * change_page_attr_addr is unnecessary
...@@ -84,13 +87,15 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -84,13 +87,15 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
if (!lookup_address(vaddr, &level)) if (!lookup_address(vaddr, &level))
return 0; return 0;
/* switch (mode) {
* Must use an address here and not struct page because the case IOR_MODE_UNCACHED:
* phys addr can be a in hole between nodes and not have a default:
* memmap entry. err = set_memory_uc(vaddr, nrpages);
*/ break;
err = change_page_attr_addr(vaddr, npages, prot); case IOR_MODE_CACHED:
err = set_memory_wb(vaddr, nrpages);
break;
}
if (!err) if (!err)
global_flush_tlb(); global_flush_tlb();
...@@ -107,12 +112,12 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -107,12 +112,12 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
* caller shouldn't need to know that small detail. * caller shouldn't need to know that small detail.
*/ */
static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
unsigned long flags) enum ioremap_mode mode)
{ {
void __iomem *addr; void __iomem *addr;
struct vm_struct *area; struct vm_struct *area;
unsigned long offset, last_addr; unsigned long offset, last_addr;
pgprot_t pgprot; pgprot_t prot;
/* Don't allow wraparound or zero size */ /* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1; last_addr = phys_addr + size - 1;
...@@ -134,7 +139,15 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -134,7 +139,15 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
return NULL; return NULL;
} }
pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); switch (mode) {
case IOR_MODE_UNCACHED:
default:
prot = PAGE_KERNEL_NOCACHE;
break;
case IOR_MODE_CACHED:
prot = PAGE_KERNEL;
break;
}
/* /*
* Mappings have to be page-aligned * Mappings have to be page-aligned
...@@ -152,12 +165,12 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -152,12 +165,12 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr; addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) { phys_addr, prot)) {
remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
return NULL; return NULL;
} }
if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { if (ioremap_change_attr(phys_addr, size, mode) < 0) {
vunmap(addr); vunmap(addr);
return NULL; return NULL;
} }
...@@ -188,13 +201,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -188,13 +201,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
*/ */
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{ {
return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
} }
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap_nocache);
void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size) void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
{ {
return __ioremap(phys_addr, size, 0); return __ioremap(phys_addr, size, IOR_MODE_CACHED);
} }
EXPORT_SYMBOL(ioremap_cache); EXPORT_SYMBOL(ioremap_cache);
...@@ -242,7 +255,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -242,7 +255,7 @@ void iounmap(volatile void __iomem *addr)
} }
/* Reset the direct mapping. Can block */ /* Reset the direct mapping. Can block */
ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL); ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED);
/* Finally remove it */ /* Finally remove it */
o = remove_vm_area((void *)addr); o = remove_vm_area((void *)addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment