Commit e9332cac authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: switch to change_page_attr_addr in ioremap_32.c

Use change_page_attr_addr() instead of change_page_attr(), which
simplifies the code significantly and matches the 64bit
implementation.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 74ff2857
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 1995 1996 Linus Torvalds
*/ */
#include <linux/bootmem.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -18,6 +19,42 @@ ...@@ -18,6 +19,42 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts.
*/
static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
pgprot_t prot)
{
unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
int err, level;
/* No change for pages after the last mapping */
if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
return 0;
npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
vaddr = (unsigned long) __va(phys_addr);
/*
* If there is no identity map for this address,
* change_page_attr_addr is unnecessary
*/
if (!lookup_address(vaddr, &level))
return 0;
/*
* Must use an address here and not struct page because the
* phys addr can be a in hole between nodes and not have a
* memmap entry.
*/
err = change_page_attr_addr(vaddr, npages, prot);
if (!err)
global_flush_tlb();
return err;
}
/* /*
* Remap an arbitrary physical address space into the kernel virtual * Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses * address space. Needed when the kernel wants to access high addresses
...@@ -33,7 +70,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -33,7 +70,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
void __iomem *addr; void __iomem *addr;
struct vm_struct *area; struct vm_struct *area;
unsigned long offset, last_addr; unsigned long offset, last_addr;
pgprot_t prot; pgprot_t pgprot;
/* Don't allow wraparound or zero size */ /* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1; last_addr = phys_addr + size - 1;
...@@ -62,7 +99,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -62,7 +99,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
return NULL; return NULL;
} }
prot = MAKE_GLOBAL(__PAGE_KERNEL | flags); pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
/* /*
* Mappings have to be page-aligned * Mappings have to be page-aligned
...@@ -79,11 +116,17 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -79,11 +116,17 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
return NULL; return NULL;
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr; addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long) addr, if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
(unsigned long) addr + size, phys_addr, prot)) { phys_addr, pgprot)) {
vunmap((void __force *) addr); vunmap((void __force *) addr);
return NULL; return NULL;
} }
if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
vunmap(addr);
return NULL;
}
return (void __iomem *) (offset + (char __iomem *)addr); return (void __iomem *) (offset + (char __iomem *)addr);
} }
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap);
...@@ -111,37 +154,7 @@ EXPORT_SYMBOL(__ioremap); ...@@ -111,37 +154,7 @@ EXPORT_SYMBOL(__ioremap);
*/ */
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{ {
unsigned long last_addr; return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
if (!p)
return p;
/* Guaranteed to be > phys_addr, as per __ioremap() */
last_addr = phys_addr + size - 1;
if (last_addr < virt_to_phys(high_memory) - 1) {
struct page *ppage = virt_to_page(__va(phys_addr));
unsigned long npages;
phys_addr &= PAGE_MASK;
/* This might overflow and become zero.. */
last_addr = PAGE_ALIGN(last_addr);
/* .. but that's ok, because modulo-2**n arithmetic will make
* the page-aligned "last - first" come out right.
*/
npages = (last_addr - phys_addr) >> PAGE_SHIFT;
if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
iounmap(p);
p = NULL;
}
global_flush_tlb();
}
return p;
} }
EXPORT_SYMBOL(ioremap_nocache); EXPORT_SYMBOL(ioremap_nocache);
...@@ -189,12 +202,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -189,12 +202,7 @@ void iounmap(volatile void __iomem *addr)
} }
/* Reset the direct mapping. Can block */ /* Reset the direct mapping. Can block */
if (p->phys_addr < virt_to_phys(high_memory) - 1) { ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
change_page_attr(virt_to_page(__va(p->phys_addr)),
get_vm_area_size(p) >> PAGE_SHIFT,
PAGE_KERNEL);
global_flush_tlb();
}
/* Finally remove it */ /* Finally remove it */
o = remove_vm_area((void *)addr); o = remove_vm_area((void *)addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment