Commit 2f997759 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel

Pull Hexagon updates from Richard Kuo:
 "Mostly cleanups for compilation with allmodconfig and some other
  miscellaneous fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel:
  Hexagon: update CR year for elf.h
  Hexagon: remove SP macro
  Hexagon:  set ELF_EXEC_PAGESIZE to PAGE_SIZE
  Hexagon: set the e_flags in user regset view for core dumps
  Hexagon: fix atomic_set
  Hexagon: add screen_info for VGA_CONSOLE
  hexagon: correct type on pgd copy
  smp, hexagon: kill SMP single function call interrupt
  arch: hexagon: include: asm: add generic macro 'mmiowb' in "io.h"
  arch: hexagon: kernel: hexagon_ksyms.c: export related symbols which various modules need
  arch: hexagon: kernel: reset.c: use function pointer instead of function for pm_power_off and export it
  arch: hexagon: include: asm: add "vga.h" in Kbuild
  arch: hexagon: include: asm: Kbuild: add generic "serial.h" in Kbuild
  arch: hexagon: include: uapi: asm: setup.h add swith macro __KERNEL__
  arch: hexagon: include: asm: add prefix "hvm[ci]_" for all enum members in "hexagon_vm.h"
  arch: hexagon: Kconfig: add HAVE_DMA_ATTR in Kconfig and remove "linux/dma-mapping.h" from "asm/dma-mapping.h"
  arch: hexagon: kernel: add export symbol function __delay()
  hexagon: include: asm: kgdb: extend DBG_MAX_REG_NUM for "cs0/1"
  hexagon: kernel: kgdb: include related header for pass compiling.
  hexagon: kernel: remove useless variables 'dn', 'r' and 'err' in time_init_deferred() in "time.c"
parents 472e3741 a9a44fdf
...@@ -28,6 +28,7 @@ config HEXAGON ...@@ -28,6 +28,7 @@ config HEXAGON
select GENERIC_CLOCKEVENTS_BROADCAST select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES select GENERIC_CPU_DEVICES
select HAVE_DMA_ATTRS
---help--- ---help---
Qualcomm Hexagon is a processor architecture designed for high Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications. performance and low power across a wide variety of applications.
......
...@@ -41,6 +41,7 @@ generic-y += scatterlist.h ...@@ -41,6 +41,7 @@ generic-y += scatterlist.h
generic-y += sections.h generic-y += sections.h
generic-y += segment.h generic-y += segment.h
generic-y += sembuf.h generic-y += sembuf.h
generic-y += serial.h
generic-y += shmbuf.h generic-y += shmbuf.h
generic-y += shmparam.h generic-y += shmparam.h
generic-y += siginfo.h generic-y += siginfo.h
...@@ -56,4 +57,5 @@ generic-y += trace_clock.h ...@@ -56,4 +57,5 @@ generic-y += trace_clock.h
generic-y += types.h generic-y += types.h
generic-y += ucontext.h generic-y += ucontext.h
generic-y += unaligned.h generic-y += unaligned.h
generic-y += vga.h
generic-y += xor.h generic-y += xor.h
...@@ -26,7 +26,20 @@ ...@@ -26,7 +26,20 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) ((v)->counter = (i))
/* Normal writes in our arch don't clear lock reservations */
static inline void atomic_set(atomic_t *v, int new)
{
asm volatile(
"1: r6 = memw_locked(%0);\n"
" memw_locked(%0,p0) = %1;\n"
" if (!P0) jump 1b;\n"
:
: "r" (&v->counter), "r" (new)
: "memory", "p0", "r6"
);
}
/** /**
* atomic_read - reads a word, atomically * atomic_read - reads a word, atomically
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/param.h> #include <asm/param.h>
extern void __delay(unsigned long cycles);
extern void __udelay(unsigned long usecs); extern void __udelay(unsigned long usecs);
#define udelay(usecs) __udelay((usecs)) #define udelay(usecs) __udelay((usecs))
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h> #include <linux/dma-debug.h>
#include <linux/dma-attrs.h> #include <linux/dma-attrs.h>
#include <asm/io.h> #include <asm/io.h>
......
/* /*
* ELF definitions for the Hexagon architecture * ELF definitions for the Hexagon architecture
* *
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
...@@ -202,7 +202,7 @@ do { \ ...@@ -202,7 +202,7 @@ do { \
#define CORE_DUMP_USE_REGSET #define CORE_DUMP_USE_REGSET
/* Hrm is this going to cause problems for changing PAGE_SIZE? */ /* Hrm is this going to cause problems for changing PAGE_SIZE? */
#define ELF_EXEC_PAGESIZE 4096 #define ELF_EXEC_PAGESIZE PAGE_SIZE
/* /*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical * This is the location that an ET_DYN program is loaded if exec'ed. Typical
......
...@@ -55,27 +55,27 @@ ...@@ -55,27 +55,27 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
enum VM_CACHE_OPS { enum VM_CACHE_OPS {
ickill, hvmc_ickill,
dckill, hvmc_dckill,
l2kill, hvmc_l2kill,
dccleaninva, hvmc_dccleaninva,
icinva, hvmc_icinva,
idsync, hvmc_idsync,
fetch_cfg hvmc_fetch_cfg
}; };
enum VM_INT_OPS { enum VM_INT_OPS {
nop, hvmi_nop,
globen, hvmi_globen,
globdis, hvmi_globdis,
locen, hvmi_locen,
locdis, hvmi_locdis,
affinity, hvmi_affinity,
get, hvmi_get,
peek, hvmi_peek,
status, hvmi_status,
post, hvmi_post,
clear hvmi_clear
}; };
extern void _K_VM_event_vector(void); extern void _K_VM_event_vector(void);
...@@ -98,95 +98,95 @@ long __vmvpid(void); ...@@ -98,95 +98,95 @@ long __vmvpid(void);
static inline long __vmcache_ickill(void) static inline long __vmcache_ickill(void)
{ {
return __vmcache(ickill, 0, 0); return __vmcache(hvmc_ickill, 0, 0);
} }
static inline long __vmcache_dckill(void) static inline long __vmcache_dckill(void)
{ {
return __vmcache(dckill, 0, 0); return __vmcache(hvmc_dckill, 0, 0);
} }
static inline long __vmcache_l2kill(void) static inline long __vmcache_l2kill(void)
{ {
return __vmcache(l2kill, 0, 0); return __vmcache(hvmc_l2kill, 0, 0);
} }
static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len) static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len)
{ {
return __vmcache(dccleaninva, addr, len); return __vmcache(hvmc_dccleaninva, addr, len);
} }
static inline long __vmcache_icinva(unsigned long addr, unsigned long len) static inline long __vmcache_icinva(unsigned long addr, unsigned long len)
{ {
return __vmcache(icinva, addr, len); return __vmcache(hvmc_icinva, addr, len);
} }
static inline long __vmcache_idsync(unsigned long addr, static inline long __vmcache_idsync(unsigned long addr,
unsigned long len) unsigned long len)
{ {
return __vmcache(idsync, addr, len); return __vmcache(hvmc_idsync, addr, len);
} }
static inline long __vmcache_fetch_cfg(unsigned long val) static inline long __vmcache_fetch_cfg(unsigned long val)
{ {
return __vmcache(fetch_cfg, val, 0); return __vmcache(hvmc_fetch_cfg, val, 0);
} }
/* interrupt operations */ /* interrupt operations */
static inline long __vmintop_nop(void) static inline long __vmintop_nop(void)
{ {
return __vmintop(nop, 0, 0, 0, 0); return __vmintop(hvmi_nop, 0, 0, 0, 0);
} }
static inline long __vmintop_globen(long i) static inline long __vmintop_globen(long i)
{ {
return __vmintop(globen, i, 0, 0, 0); return __vmintop(hvmi_globen, i, 0, 0, 0);
} }
static inline long __vmintop_globdis(long i) static inline long __vmintop_globdis(long i)
{ {
return __vmintop(globdis, i, 0, 0, 0); return __vmintop(hvmi_globdis, i, 0, 0, 0);
} }
static inline long __vmintop_locen(long i) static inline long __vmintop_locen(long i)
{ {
return __vmintop(locen, i, 0, 0, 0); return __vmintop(hvmi_locen, i, 0, 0, 0);
} }
static inline long __vmintop_locdis(long i) static inline long __vmintop_locdis(long i)
{ {
return __vmintop(locdis, i, 0, 0, 0); return __vmintop(hvmi_locdis, i, 0, 0, 0);
} }
static inline long __vmintop_affinity(long i, long cpu) static inline long __vmintop_affinity(long i, long cpu)
{ {
return __vmintop(locdis, i, cpu, 0, 0); return __vmintop(hvmi_affinity, i, cpu, 0, 0);
} }
static inline long __vmintop_get(void) static inline long __vmintop_get(void)
{ {
return __vmintop(get, 0, 0, 0, 0); return __vmintop(hvmi_get, 0, 0, 0, 0);
} }
static inline long __vmintop_peek(void) static inline long __vmintop_peek(void)
{ {
return __vmintop(peek, 0, 0, 0, 0); return __vmintop(hvmi_peek, 0, 0, 0, 0);
} }
static inline long __vmintop_status(long i) static inline long __vmintop_status(long i)
{ {
return __vmintop(status, i, 0, 0, 0); return __vmintop(hvmi_status, i, 0, 0, 0);
} }
static inline long __vmintop_post(long i) static inline long __vmintop_post(long i)
{ {
return __vmintop(post, i, 0, 0, 0); return __vmintop(hvmi_post, i, 0, 0, 0);
} }
static inline long __vmintop_clear(long i) static inline long __vmintop_clear(long i)
{ {
return __vmintop(clear, i, 0, 0, 0); return __vmintop(hvmi_clear, i, 0, 0, 0);
} }
#else /* Only assembly code should reference these */ #else /* Only assembly code should reference these */
......
...@@ -189,6 +189,8 @@ static inline void writel(u32 data, volatile void __iomem *addr) ...@@ -189,6 +189,8 @@ static inline void writel(u32 data, volatile void __iomem *addr)
#define writew_relaxed __raw_writew #define writew_relaxed __raw_writew
#define writel_relaxed __raw_writel #define writel_relaxed __raw_writel
#define mmiowb()
/* /*
* Need an mtype somewhere in here, for cache type deals? * Need an mtype somewhere in here, for cache type deals?
* This is probably too long for an inline. * This is probably too long for an inline.
......
...@@ -34,10 +34,11 @@ static inline void arch_kgdb_breakpoint(void) ...@@ -34,10 +34,11 @@ static inline void arch_kgdb_breakpoint(void)
* 32 gpr + sa0/1 + lc0/1 + m0/1 + gp + ugp + pred + pc = 42 total. * 32 gpr + sa0/1 + lc0/1 + m0/1 + gp + ugp + pred + pc = 42 total.
* vm regs = psp+elr+est+badva = 4 * vm regs = psp+elr+est+badva = 4
* syscall+restart = 2 more * syscall+restart = 2 more
* so 48 = 42 +4 + 2 * also add cs0/1 = 2
* so 48 = 42 + 4 + 2 + 2
*/ */
#define DBG_USER_REGS 42 #define DBG_USER_REGS 42
#define DBG_MAX_REG_NUM (DBG_USER_REGS + 6) #define DBG_MAX_REG_NUM (DBG_USER_REGS + 8)
#define NUMREGBYTES (DBG_MAX_REG_NUM*4) #define NUMREGBYTES (DBG_MAX_REG_NUM*4)
#endif /* __HEXAGON_KGDB_H__ */ #endif /* __HEXAGON_KGDB_H__ */
...@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
* map with a copy of the kernel's persistent map. * map with a copy of the kernel's persistent map.
*/ */
memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t *)); memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t));
mm->context.generation = kmap_generation; mm->context.generation = kmap_generation;
/* Physical version is what is passed to virtual machine on switch */ /* Physical version is what is passed to virtual machine on switch */
......
...@@ -29,7 +29,6 @@ enum ipi_message_type { ...@@ -29,7 +29,6 @@ enum ipi_message_type {
IPI_NOP = 0, IPI_NOP = 0,
IPI_RESCHEDULE = 1, IPI_RESCHEDULE = 1,
IPI_CALL_FUNC, IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP, IPI_CPU_STOP,
IPI_TIMER, IPI_TIMER,
}; };
......
...@@ -6,8 +6,6 @@ ...@@ -6,8 +6,6 @@
#ifndef _ASM_REGISTERS_H #ifndef _ASM_REGISTERS_H
#define _ASM_REGISTERS_H #define _ASM_REGISTERS_H
#define SP r29
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* See kernel/entry.S for further documentation. */ /* See kernel/entry.S for further documentation. */
...@@ -215,7 +213,7 @@ struct pt_regs { ...@@ -215,7 +213,7 @@ struct pt_regs {
#define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT)) #define pt_clr_singlestep(regs) ((regs)->hvmer.vmest &= ~(1<<HVM_VMEST_SS_SFT))
#define pt_set_rte_sp(regs, sp) do {\ #define pt_set_rte_sp(regs, sp) do {\
pt_psp(regs) = (regs)->SP = (sp);\ pt_psp(regs) = (regs)->r29 = (sp);\
} while (0) } while (0)
#define pt_set_kmode(regs) \ #define pt_set_kmode(regs) \
......
...@@ -19,7 +19,12 @@ ...@@ -19,7 +19,12 @@
#ifndef _ASM_SETUP_H #ifndef _ASM_SETUP_H
#define _ASM_SETUP_H #define _ASM_SETUP_H
#ifdef __KERNEL__
#include <linux/init.h> #include <linux/init.h>
#else
#define __init
#endif
#include <asm-generic/setup.h> #include <asm-generic/setup.h>
extern char external_cmdline_buffer; extern char external_cmdline_buffer;
......
...@@ -15,3 +15,5 @@ obj-y += vm_vectors.o ...@@ -15,3 +15,5 @@ obj-y += vm_vectors.o
obj-$(CONFIG_HAS_DMA) += dma.o obj-$(CONFIG_HAS_DMA) += dma.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_VGA_CONSOLE) += screen_info.o
...@@ -18,23 +18,39 @@ ...@@ -18,23 +18,39 @@
* 02110-1301, USA. * 02110-1301, USA.
*/ */
#include <linux/dma-mapping.h>
#include <asm/hexagon_vm.h> #include <asm/hexagon_vm.h>
#include <asm/io.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
/* Additional functions */
EXPORT_SYMBOL(__clear_user_hexagon);
EXPORT_SYMBOL(__copy_from_user_hexagon); EXPORT_SYMBOL(__copy_from_user_hexagon);
EXPORT_SYMBOL(__copy_to_user_hexagon); EXPORT_SYMBOL(__copy_to_user_hexagon);
EXPORT_SYMBOL(__iounmap);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__vmgetie); EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie); EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(ioremap_nocache);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);
/* Additional variables */
EXPORT_SYMBOL(__phys_offset);
EXPORT_SYMBOL(_dflt_cache_att);
EXPORT_SYMBOL(bad_dma_address);
#define DECLARE_EXPORT(name) \ #define DECLARE_EXPORT(name) \
extern void name(void); EXPORT_SYMBOL(name) extern void name(void); EXPORT_SYMBOL(name)
/* Symbols found in libgcc that assorted kernel modules need */ /* Symbols found in libgcc that assorted kernel modules need */
DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes); DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
DECLARE_EXPORT(__hexagon_divsi3); /* Additional functions */
DECLARE_EXPORT(__hexagon_modsi3); DECLARE_EXPORT(__divsi3);
DECLARE_EXPORT(__hexagon_udivsi3); DECLARE_EXPORT(__modsi3);
DECLARE_EXPORT(__hexagon_umodsi3); DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__umodsi3);
DECLARE_EXPORT(csum_tcpudp_magic);
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
* 02110-1301, USA. * 02110-1301, USA.
*/ */
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/kgdb.h> #include <linux/kgdb.h>
......
...@@ -183,6 +183,7 @@ static const struct user_regset_view hexagon_user_view = { ...@@ -183,6 +183,7 @@ static const struct user_regset_view hexagon_user_view = {
.e_machine = ELF_ARCH, .e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI, .ei_osabi = ELF_OSABI,
.regsets = hexagon_regsets, .regsets = hexagon_regsets,
.e_flags = ELF_CORE_EFLAGS,
.n = ARRAY_SIZE(hexagon_regsets) .n = ARRAY_SIZE(hexagon_regsets)
}; };
......
...@@ -33,6 +33,5 @@ void machine_restart(char *cmd) ...@@ -33,6 +33,5 @@ void machine_restart(char *cmd)
{ {
} }
void pm_power_off(void) void (*pm_power_off)(void) = NULL;
{ EXPORT_SYMBOL(pm_power_off);
}
#include <linux/screen_info.h>
struct screen_info screen_info;
...@@ -64,10 +64,6 @@ static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi, ...@@ -64,10 +64,6 @@ static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
generic_smp_call_function_interrupt(); generic_smp_call_function_interrupt();
break; break;
case IPI_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
break;
case IPI_CPU_STOP: case IPI_CPU_STOP:
/* /*
* call vmstop() * call vmstop()
...@@ -248,7 +244,7 @@ void smp_send_stop(void) ...@@ -248,7 +244,7 @@ void smp_send_stop(void)
void arch_send_call_function_single_ipi(int cpu) void arch_send_call_function_single_ipi(int cpu)
{ {
send_ipi(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
} }
void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
......
...@@ -191,9 +191,6 @@ void __init time_init_deferred(void) ...@@ -191,9 +191,6 @@ void __init time_init_deferred(void)
{ {
struct resource *resource = NULL; struct resource *resource = NULL;
struct clock_event_device *ce_dev = &hexagon_clockevent_dev; struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
struct device_node *dn;
struct resource r;
int err;
ce_dev->cpumask = cpu_all_mask; ce_dev->cpumask = cpu_all_mask;
...@@ -232,6 +229,15 @@ void __init time_init(void) ...@@ -232,6 +229,15 @@ void __init time_init(void)
late_time_init = time_init_deferred; late_time_init = time_init_deferred;
} }
void __delay(unsigned long cycles)
{
unsigned long long start = __vmgettime();
while ((__vmgettime() - start) < cycles)
cpu_relax();
}
EXPORT_SYMBOL(__delay);
/* /*
* This could become parametric or perhaps even computed at run-time, * This could become parametric or perhaps even computed at run-time,
* but for now we take the observed simulator jitter. * but for now we take the observed simulator jitter.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment