Commit d7677d40 authored by venkatesh.pallipadi@intel.com's avatar venkatesh.pallipadi@intel.com Committed by Ingo Molnar

x86: PAT use reserve free memtype in ioremap and iounmap

Use reserve_memtype and free_memtype interfaces in ioremap/iounmap to avoid
aliasing.

If there is an existing alias for the region, inherit the memory type from
the alias. If there are conflicting aliases for the entire region, then fail
ioremap.
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3a96ce8c
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pat.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -118,6 +119,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -118,6 +119,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
{ {
unsigned long pfn, offset, last_addr, vaddr; unsigned long pfn, offset, last_addr, vaddr;
struct vm_struct *area; struct vm_struct *area;
unsigned long new_prot_val;
pgprot_t prot; pgprot_t prot;
/* Don't allow wraparound or zero size */ /* Don't allow wraparound or zero size */
...@@ -151,6 +153,28 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -151,6 +153,28 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
WARN_ON_ONCE(is_ram); WARN_ON_ONCE(is_ram);
} }
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
if (reserve_memtype(phys_addr, phys_addr + size,
prot_val, &new_prot_val)) {
/*
* Do not fallback to certain memory types with certain
* requested type:
* - request is uncached, return cannot be write-back
*/
if ((prot_val == _PAGE_CACHE_UC &&
new_prot_val == _PAGE_CACHE_WB)) {
free_memtype(phys_addr, phys_addr + size);
return NULL;
}
prot_val = new_prot_val;
}
switch (prot_val) { switch (prot_val) {
case _PAGE_CACHE_UC: case _PAGE_CACHE_UC:
default: default:
...@@ -161,13 +185,6 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -161,13 +185,6 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
break; break;
} }
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
/* /*
* Ok, go for it.. * Ok, go for it..
*/ */
...@@ -177,11 +194,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, ...@@ -177,11 +194,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
vaddr = (unsigned long) area->addr; vaddr = (unsigned long) area->addr;
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
free_memtype(phys_addr, phys_addr + size);
free_vm_area(area); free_vm_area(area);
return NULL; return NULL;
} }
if (ioremap_change_attr(vaddr, size, prot_val) < 0) { if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
free_memtype(phys_addr, phys_addr + size);
vunmap(area->addr); vunmap(area->addr);
return NULL; return NULL;
} }
...@@ -265,6 +284,8 @@ void iounmap(volatile void __iomem *addr) ...@@ -265,6 +284,8 @@ void iounmap(volatile void __iomem *addr)
return; return;
} }
free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
/* Finally remove it */ /* Finally remove it */
o = remove_vm_area((void *)addr); o = remove_vm_area((void *)addr);
BUG_ON(p != o || o == NULL); BUG_ON(p != o || o == NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment