Commit 562bfca4 authored by Ingo Molnar's avatar Ingo Molnar

x86/mm: Clean up types in xlate_dev_mem_ptr() some more

So Linus noticed that in:

  94d4b476 ("x86/mm: Clean up types in xlate_dev_mem_ptr()")

... I added two nonsensical casts, due to the poor type choice
for 'vaddr'.

Change it to 'void *' and take advantage of void * arithmetics.

This removes the casts.

( Also remove a nonsensical return line from unxlate_dev_mem_ptr()
  while at it. )
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 99e71110
...@@ -353,18 +353,18 @@ void *xlate_dev_mem_ptr(phys_addr_t phys) ...@@ -353,18 +353,18 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
{ {
unsigned long start = phys & PAGE_MASK; unsigned long start = phys & PAGE_MASK;
unsigned long offset = phys & ~PAGE_MASK; unsigned long offset = phys & ~PAGE_MASK;
unsigned long vaddr; void *vaddr;
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
if (page_is_ram(start >> PAGE_SHIFT)) if (page_is_ram(start >> PAGE_SHIFT))
return __va(phys); return __va(phys);
vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE); vaddr = ioremap_cache(start, PAGE_SIZE);
/* Only add the offset on success and return NULL if the ioremap() failed: */ /* Only add the offset on success and return NULL if the ioremap() failed: */
if (vaddr) if (vaddr)
vaddr += offset; vaddr += offset;
return (void *)vaddr; return vaddr;
} }
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
...@@ -373,7 +373,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) ...@@ -373,7 +373,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
return; return;
iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
return;
} }
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment