Commit cb84c2b4 authored by Guenter Roeck's avatar Guenter Roeck Committed by Richard Kuo

hexagon: Fix build failures in linux-next

hexagon:defconfig fails to build in linux-next since commit 332fd7c4
("genirq: Generic chip: Change irq_reg_{readl,writel} arguments").

The primary build failure is

arch/hexagon/include/asm/cacheflush.h: In function 'copy_to_user_page':
arch/hexagon/include/asm/cacheflush.h:89:22: error: 'VM_EXEC' undeclared

This is the result of including of <linux/io.h> from <linux/irq.h>,
which is now necessary due to the use of readl and writel from irq.h.
This causes recursive inclusions in hexagon code; cacheflush.h is included
from mm.h prior to the definition of VM_EXEC.

Fix the problem by moving copy_to_user_page from the hexagon include file to
arch/hexagon/mm/cache.c, similar to other architectures. After this change,
several redefinitions of readl and writel are reported. Those are caused
by recursive inclusions of io.h and asm/cacheflush.h. Fix those problems by
reducing the number of files included from those files. Also, it was necessary
to stop including asm-generic/cacheflush.h from asm/cacheflush.h. Instead,
functionality originally provided by asm-generic/cacheflush.h is now coded
in asm/cacheflush.h directly.

Cc: Kevin Cernekee <cernekee@gmail.com>
Cc: Jason Cooper <jason@lakedaemon.net>
Signed-off-by: default avatarGuenter Roeck <linux@roeck-us.net>
signed-off-by: default avatarRichard Kuo <rkuo@codeaurora.org>
parent b2776bf7
...@@ -21,10 +21,7 @@ ...@@ -21,10 +21,7 @@
#ifndef _ASM_CACHEFLUSH_H #ifndef _ASM_CACHEFLUSH_H
#define _ASM_CACHEFLUSH_H #define _ASM_CACHEFLUSH_H
#include <linux/cache.h> #include <linux/mm_types.h>
#include <linux/mm.h>
#include <asm/string.h>
#include <asm-generic/cacheflush.h>
/* Cache flushing: /* Cache flushing:
* *
...@@ -41,6 +38,20 @@ ...@@ -41,6 +38,20 @@
#define LINESIZE 32 #define LINESIZE 32
#define LINEBITS 5 #define LINEBITS 5
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
/* /*
* Flush Dcache range through current map. * Flush Dcache range through current map.
*/ */
...@@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end); ...@@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end);
/* /*
* Flush Icache range through current map. * Flush Icache range through current map.
*/ */
#undef flush_icache_range
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
/* /*
...@@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, ...@@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
/* generic_ptrace_pokedata doesn't wind up here, does it? */ /* generic_ptrace_pokedata doesn't wind up here, does it? */
} }
#undef copy_to_user_page void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
static inline void copy_to_user_page(struct vm_area_struct *vma, unsigned long vaddr, void *dst, void *src, int len);
struct page *page,
unsigned long vaddr,
void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end); extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end); extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
......
...@@ -24,14 +24,9 @@ ...@@ -24,14 +24,9 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/types.h> #include <linux/types.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <asm/string.h>
#include <asm/mem-layout.h>
#include <asm/iomap.h> #include <asm/iomap.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* /*
* We don't have PCI yet. * We don't have PCI yet.
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/mm.h> #include <linux/mm.h>
......
...@@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void) ...@@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void)
local_irq_restore(flags); local_irq_restore(flags);
mb(); mb();
} }
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h>
void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment