Commit 91eebf40 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: style cleanup of ioremap code

Fix the coding style before going further.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1aaf74e9
/* /*
* arch/i386/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it. * Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the * This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's * 640k-1MB IO memory area on PC's
...@@ -21,10 +19,6 @@ ...@@ -21,10 +19,6 @@
#define ISA_START_ADDRESS 0xa0000 #define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000 #define ISA_END_ADDRESS 0x100000
/*
* Generic mapping function (not visible outside):
*/
/* /*
* Remap an arbitrary physical address space into the kernel virtual * Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses * address space. Needed when the kernel wants to access high addresses
...@@ -34,10 +28,11 @@ ...@@ -34,10 +28,11 @@
* have to convert them into an offset in a page-aligned mapping, but the * have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail. * caller shouldn't need to know that small detail.
*/ */
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
unsigned long flags)
{ {
void __iomem * addr; void __iomem *addr;
struct vm_struct * area; struct vm_struct *area;
unsigned long offset, last_addr; unsigned long offset, last_addr;
pgprot_t prot; pgprot_t prot;
...@@ -62,8 +57,9 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l ...@@ -62,8 +57,9 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
t_addr = __va(phys_addr); t_addr = __va(phys_addr);
t_end = t_addr + (size - 1); t_end = t_addr + (size - 1);
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) for (page = virt_to_page(t_addr);
if(!PageReserved(page)) page <= virt_to_page(t_end); page++)
if (!PageReserved(page))
return NULL; return NULL;
} }
...@@ -114,11 +110,11 @@ EXPORT_SYMBOL(__ioremap); ...@@ -114,11 +110,11 @@ EXPORT_SYMBOL(__ioremap);
* *
* Must be freed with iounmap. * Must be freed with iounmap.
*/ */
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{ {
unsigned long last_addr; unsigned long last_addr;
void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
if (!p) if (!p)
return p; return p;
...@@ -172,7 +168,8 @@ void iounmap(volatile void __iomem *addr) ...@@ -172,7 +168,8 @@ void iounmap(volatile void __iomem *addr)
addr < phys_to_virt(ISA_END_ADDRESS)) addr < phys_to_virt(ISA_END_ADDRESS))
return; return;
addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr);
/* Use the vm area unlocked, assuming the caller /* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address ensures there isn't another iounmap for the same address
...@@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr)
read_unlock(&vmlist_lock); read_unlock(&vmlist_lock);
if (!p) { if (!p) {
printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr);
dump_stack(); dump_stack();
return; return;
} }
...@@ -237,7 +234,7 @@ void __init early_ioremap_init(void) ...@@ -237,7 +234,7 @@ void __init early_ioremap_init(void)
unsigned long *pgd; unsigned long *pgd;
if (early_ioremap_debug) if (early_ioremap_debug)
printk("early_ioremap_init()\n"); printk(KERN_DEBUG "early_ioremap_init()\n");
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = __pa(bm_pte) | _PAGE_TABLE; *pgd = __pa(bm_pte) | _PAGE_TABLE;
...@@ -248,15 +245,16 @@ void __init early_ioremap_init(void) ...@@ -248,15 +245,16 @@ void __init early_ioremap_init(void)
*/ */
if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
WARN_ON(1); WARN_ON(1);
printk("pgd %p != %p\n", printk(KERN_WARNING "pgd %p != %p\n",
pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN)); fix_to_virt(FIX_BTMAP_BEGIN));
printk("fix_to_virt(FIX_BTMAP_END): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END)); fix_to_virt(FIX_BTMAP_END));
printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
FIX_BTMAP_BEGIN);
} }
} }
...@@ -265,7 +263,7 @@ void __init early_ioremap_clear(void) ...@@ -265,7 +263,7 @@ void __init early_ioremap_clear(void)
unsigned long *pgd; unsigned long *pgd;
if (early_ioremap_debug) if (early_ioremap_debug)
printk("early_ioremap_clear()\n"); printk(KERN_DEBUG "early_ioremap_clear()\n");
pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
*pgd = 0; *pgd = 0;
...@@ -351,7 +349,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size) ...@@ -351,7 +349,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
nesting = early_ioremap_nested; nesting = early_ioremap_nested;
if (early_ioremap_debug) { if (early_ioremap_debug) {
printk("early_ioremap(%08lx, %08lx) [%d] => ", printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
phys_addr, size, nesting); phys_addr, size, nesting);
dump_stack(); dump_stack();
} }
...@@ -398,7 +396,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size) ...@@ -398,7 +396,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
if (early_ioremap_debug) if (early_ioremap_debug)
printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
return (void*) (offset + fix_to_virt(idx0)); return (void *) (offset + fix_to_virt(idx0));
} }
void __init early_iounmap(void *addr, unsigned long size) void __init early_iounmap(void *addr, unsigned long size)
...@@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size) ...@@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size)
WARN_ON(nesting < 0); WARN_ON(nesting < 0);
if (early_ioremap_debug) { if (early_ioremap_debug) {
printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
size, nesting);
dump_stack(); dump_stack();
} }
......
/* /*
* arch/x86_64/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it. * Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the * This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's * 640k-1MB IO memory area on PC's
...@@ -33,8 +31,7 @@ EXPORT_SYMBOL(__phys_addr); ...@@ -33,8 +31,7 @@ EXPORT_SYMBOL(__phys_addr);
* Fix up the linear direct mapping of the kernel to avoid cache attribute * Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts. * conflicts.
*/ */
static int static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
ioremap_change_attr(unsigned long phys_addr, unsigned long size,
unsigned long flags) unsigned long flags)
{ {
int err = 0; int err = 0;
...@@ -50,20 +47,18 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -50,20 +47,18 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
if (!lookup_address(vaddr, &level)) if (!lookup_address(vaddr, &level))
return err; return err;
/* /*
* Must use a address here and not struct page because the phys addr * Must use a address here and not struct page because
* can be a in hole between nodes and not have an memmap entry. * the phys addr can be a in hole between nodes and
* not have an memmap entry.
*/ */
err = change_page_attr_addr(vaddr,npages,MAKE_GLOBAL(__PAGE_KERNEL|flags)); err = change_page_attr_addr(vaddr,npages,
MAKE_GLOBAL(__PAGE_KERNEL|flags));
if (!err) if (!err)
global_flush_tlb(); global_flush_tlb();
} }
return err; return err;
} }
/*
* Generic mapping function
*/
/* /*
* Remap an arbitrary physical address space into the kernel virtual * Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses * address space. Needed when the kernel wants to access high addresses
...@@ -73,10 +68,11 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, ...@@ -73,10 +68,11 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
* have to convert them into an offset in a page-aligned mapping, but the * have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail. * caller shouldn't need to know that small detail.
*/ */
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
unsigned long flags)
{ {
void * addr; void *addr;
struct vm_struct * area; struct vm_struct *area;
unsigned long offset, last_addr; unsigned long offset, last_addr;
pgprot_t pgprot; pgprot_t pgprot;
...@@ -142,8 +138,7 @@ EXPORT_SYMBOL(__ioremap); ...@@ -142,8 +138,7 @@ EXPORT_SYMBOL(__ioremap);
* *
* Must be freed with iounmap. * Must be freed with iounmap.
*/ */
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{ {
return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
} }
...@@ -165,7 +160,8 @@ void iounmap(volatile void __iomem *addr) ...@@ -165,7 +160,8 @@ void iounmap(volatile void __iomem *addr)
addr < phys_to_virt(ISA_END_ADDRESS)) addr < phys_to_virt(ISA_END_ADDRESS))
return; return;
addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr);
/* Use the vm area unlocked, assuming the caller /* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by in parallel. Reuse of the virtual address is prevented by
...@@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr) ...@@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr)
read_unlock(&vmlist_lock); read_unlock(&vmlist_lock);
if (!p) { if (!p) {
printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr);
dump_stack(); dump_stack();
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment