Commit 71f2c153 authored by Arnd Bergmann's avatar Arnd Bergmann

Merge branch 'depends/rmk/devel-stable' into next/cleanup

parents 8efc59ad c825dda9
...@@ -29,6 +29,7 @@ config ARM ...@@ -29,6 +29,7 @@ config ARM
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ select HAVE_SPARSE_IRQ
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select CPU_PM if (SUSPEND || CPU_IDLE)
help help
The ARM series is a line of low-power-consumption RISC chip designs The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and licensed by ARM Ltd and targeted at embedded applications and
...@@ -1794,6 +1795,38 @@ config ZBOOT_ROM_SH_MOBILE_SDHI ...@@ -1794,6 +1795,38 @@ config ZBOOT_ROM_SH_MOBILE_SDHI
endchoice endchoice
config ARM_APPENDED_DTB
bool "Use appended device tree blob to zImage (EXPERIMENTAL)"
depends on OF && !ZBOOT_ROM && EXPERIMENTAL
help
With this option, the boot code will look for a device tree binary
(DTB) appended to zImage
(e.g. cat zImage <filename>.dtb > zImage_w_dtb).
This is meant as a backward compatibility convenience for those
systems with a bootloader that can't be upgraded to accommodate
the documented boot protocol using a device tree.
Beware that there is very little in terms of protection against
this option being confused by leftover garbage in memory that might
look like a DTB header after a reboot if no actual DTB is appended
to zImage. Do not leave this option active in a production kernel
if you don't intend to always append a DTB. Proper passing of the
location into r2 of a bootloader provided DTB is always preferable
to this option.
config ARM_ATAG_DTB_COMPAT
bool "Supplement the appended DTB with traditional ATAG information"
depends on ARM_APPENDED_DTB
help
Some old bootloaders can't be updated to a DTB capable one, yet
they provide ATAGs with memory configuration, the ramdisk address,
the kernel cmdline string, etc. Such information is dynamically
provided by the bootloader and can't always be stored in a static
DTB. To allow a device tree enabled kernel to be used with such
bootloaders, this option allows zImage to extract the information
from the ATAG list and store it at run time into the appended DTB.
config CMDLINE config CMDLINE
string "Default kernel command string" string "Default kernel command string"
default "" default ""
......
...@@ -129,4 +129,10 @@ config DEBUG_S3C_UART ...@@ -129,4 +129,10 @@ config DEBUG_S3C_UART
The uncompressor code port configuration is now handled The uncompressor code port configuration is now handled
by CONFIG_S3C_LOWLEVEL_UART_PORT. by CONFIG_S3C_LOWLEVEL_UART_PORT.
config ARM_KPROBES_TEST
tristate "Kprobes test module"
depends on KPROBES && MODULES
help
Perform tests of kprobes API and instruction set simulation.
endmenu endmenu
...@@ -5,3 +5,12 @@ piggy.lzo ...@@ -5,3 +5,12 @@ piggy.lzo
piggy.lzma piggy.lzma
vmlinux vmlinux
vmlinux.lds vmlinux.lds
# borrowed libfdt files
fdt.c
fdt.h
fdt_ro.c
fdt_rw.c
fdt_wip.c
libfdt.h
libfdt_internal.h
...@@ -26,6 +26,10 @@ HEAD = head.o ...@@ -26,6 +26,10 @@ HEAD = head.o
OBJS += misc.o decompress.o OBJS += misc.o decompress.o
FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
# string library code (-Os is enforced to keep it much smaller)
OBJS += string.o
CFLAGS_string.o := -Os
# #
# Architecture dependencies # Architecture dependencies
# #
...@@ -89,21 +93,41 @@ suffix_$(CONFIG_KERNEL_GZIP) = gzip ...@@ -89,21 +93,41 @@ suffix_$(CONFIG_KERNEL_GZIP) = gzip
suffix_$(CONFIG_KERNEL_LZO) = lzo suffix_$(CONFIG_KERNEL_LZO) = lzo
suffix_$(CONFIG_KERNEL_LZMA) = lzma suffix_$(CONFIG_KERNEL_LZMA) = lzma
# Borrowed libfdt files for the ATAG compatibility mode
libfdt := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c
libfdt_hdrs := fdt.h libfdt.h libfdt_internal.h
libfdt_objs := $(addsuffix .o, $(basename $(libfdt)))
$(addprefix $(obj)/,$(libfdt) $(libfdt_hdrs)): $(obj)/%: $(srctree)/scripts/dtc/libfdt/%
$(call cmd,shipped)
$(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \
$(addprefix $(obj)/,$(libfdt_hdrs))
ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
OBJS += $(libfdt_objs) atags_to_fdt.o
endif
targets := vmlinux vmlinux.lds \ targets := vmlinux vmlinux.lds \
piggy.$(suffix_y) piggy.$(suffix_y).o \ piggy.$(suffix_y) piggy.$(suffix_y).o \
font.o font.c head.o misc.o $(OBJS) lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS)
# Make sure files are removed during clean # Make sure files are removed during clean
extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs)
ifeq ($(CONFIG_FUNCTION_TRACER),y) ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS) ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
endif endif
ccflags-y := -fpic -fno-builtin ccflags-y := -fpic -fno-builtin -I$(obj)
asflags-y := -Wa,-march=all asflags-y := -Wa,-march=all
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol. # Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y) ifneq ($(CONFIG_AUTO_ZRELADDR),y)
LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR) LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
...@@ -123,7 +147,7 @@ LDFLAGS_vmlinux += -T ...@@ -123,7 +147,7 @@ LDFLAGS_vmlinux += -T
# For __aeabi_uidivmod # For __aeabi_uidivmod
lib1funcs = $(obj)/lib1funcs.o lib1funcs = $(obj)/lib1funcs.o
$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S
$(call cmd,shipped) $(call cmd,shipped)
# We need to prevent any GOTOFF relocs being used with references # We need to prevent any GOTOFF relocs being used with references
......
#include <asm/setup.h>
#include <libfdt.h>
static int node_offset(void *fdt, const char *node_path)
{
int offset = fdt_path_offset(fdt, node_path);
if (offset == -FDT_ERR_NOTFOUND)
offset = fdt_add_subnode(fdt, 0, node_path);
return offset;
}
static int setprop(void *fdt, const char *node_path, const char *property,
uint32_t *val_array, int size)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop(fdt, offset, property, val_array, size);
}
static int setprop_string(void *fdt, const char *node_path,
const char *property, const char *string)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop_string(fdt, offset, property, string);
}
static int setprop_cell(void *fdt, const char *node_path,
const char *property, uint32_t val)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
return offset;
return fdt_setprop_cell(fdt, offset, property, val);
}
/*
* Convert and fold provided ATAGs into the provided FDT.
*
* REturn values:
* = 0 -> pretend success
* = 1 -> bad ATAG (may retry with another possible ATAG pointer)
* < 0 -> error from libfdt
*/
int atags_to_fdt(void *atag_list, void *fdt, int total_space)
{
struct tag *atag = atag_list;
uint32_t mem_reg_property[2 * NR_BANKS];
int memcount = 0;
int ret;
/* make sure we've got an aligned pointer */
if ((u32)atag_list & 0x3)
return 1;
/* if we get a DTB here we're done already */
if (*(u32 *)atag_list == fdt32_to_cpu(FDT_MAGIC))
return 0;
/* validate the ATAG */
if (atag->hdr.tag != ATAG_CORE ||
(atag->hdr.size != tag_size(tag_core) &&
atag->hdr.size != 2))
return 1;
/* let's give it all the room it could need */
ret = fdt_open_into(fdt, fdt, total_space);
if (ret < 0)
return ret;
for_each_tag(atag, atag_list) {
if (atag->hdr.tag == ATAG_CMDLINE) {
setprop_string(fdt, "/chosen", "bootargs",
atag->u.cmdline.cmdline);
} else if (atag->hdr.tag == ATAG_MEM) {
if (memcount >= sizeof(mem_reg_property)/4)
continue;
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
} else if (atag->hdr.tag == ATAG_INITRD2) {
uint32_t initrd_start, initrd_size;
initrd_start = atag->u.initrd.start;
initrd_size = atag->u.initrd.size;
setprop_cell(fdt, "/chosen", "linux,initrd-start",
initrd_start);
setprop_cell(fdt, "/chosen", "linux,initrd-end",
initrd_start + initrd_size);
}
}
if (memcount)
setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount);
return fdt_pack(fdt);
}
...@@ -216,6 +216,103 @@ restart: adr r0, LC0 ...@@ -216,6 +216,103 @@ restart: adr r0, LC0
mov r10, r6 mov r10, r6
#endif #endif
mov r5, #0 @ init dtb size to 0
#ifdef CONFIG_ARM_APPENDED_DTB
/*
* r0 = delta
* r2 = BSS start
* r3 = BSS end
* r4 = final kernel address
* r5 = appended dtb size (still unknown)
* r6 = _edata
* r7 = architecture ID
* r8 = atags/device tree pointer
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* r11 = GOT start
* r12 = GOT end
* sp = stack pointer
*
* if there are device trees (dtb) appended to zImage, advance r10 so that the
* dtb data will get relocated along with the kernel if necessary.
*/
ldr lr, [r6, #0]
#ifndef __ARMEB__
ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
#else
ldr r1, =0xd00dfeed
#endif
cmp lr, r1
bne dtb_check_done @ not found
#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
/*
* OK... Let's do some funky business here.
* If we do have a DTB appended to zImage, and we do have
* an ATAG list around, we want the later to be translated
* and folded into the former here. To be on the safe side,
* let's temporarily move the stack away into the malloc
* area. No GOT fixup has occurred yet, but none of the
* code we're about to call uses any global variable.
*/
add sp, sp, #0x10000
stmfd sp!, {r0-r3, ip, lr}
mov r0, r8
mov r1, r6
sub r2, sp, r6
bl atags_to_fdt
/*
* If returned value is 1, there is no ATAG at the location
* pointed by r8. Try the typical 0x100 offset from start
* of RAM and hope for the best.
*/
cmp r0, #1
sub r0, r4, #(TEXT_OFFSET - 0x100)
mov r1, r6
sub r2, sp, r6
blne atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr}
sub sp, sp, #0x10000
#endif
mov r8, r6 @ use the appended device tree
/*
* Make sure that the DTB doesn't end up in the final
* kernel's .bss area. To do so, we adjust the decompressed
* kernel size to compensate if that .bss size is larger
* than the relocated code.
*/
ldr r5, =_kernel_bss_size
adr r1, wont_overwrite
sub r1, r6, r1
subs r1, r5, r1
addhi r9, r9, r1
/* Get the dtb's size */
ldr r5, [r6, #4]
#ifndef __ARMEB__
/* convert r5 (dtb size) to little endian */
eor r1, r5, r5, ror #16
bic r1, r1, #0x00ff0000
mov r5, r5, ror #8
eor r5, r5, r1, lsr #8
#endif
/* preserve 64-bit alignment */
add r5, r5, #7
bic r5, r5, #7
/* relocate some pointers past the appended dtb */
add r6, r6, r5
add r10, r10, r5
add sp, sp, r5
dtb_check_done:
#endif
/* /*
* Check to see if we will overwrite ourselves. * Check to see if we will overwrite ourselves.
* r4 = final kernel address * r4 = final kernel address
...@@ -223,15 +320,14 @@ restart: adr r0, LC0 ...@@ -223,15 +320,14 @@ restart: adr r0, LC0
* r10 = end of this image, including bss/stack/malloc space if non XIP * r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want: * We basically want:
* r4 - 16k page directory >= r10 -> OK * r4 - 16k page directory >= r10 -> OK
* r4 + image length <= current position (pc) -> OK * r4 + image length <= address of wont_overwrite -> OK
*/ */
add r10, r10, #16384 add r10, r10, #16384
cmp r4, r10 cmp r4, r10
bhs wont_overwrite bhs wont_overwrite
add r10, r4, r9 add r10, r4, r9
ARM( cmp r10, pc ) adr r9, wont_overwrite
THUMB( mov lr, pc ) cmp r10, r9
THUMB( cmp r10, lr )
bls wont_overwrite bls wont_overwrite
/* /*
...@@ -285,14 +381,16 @@ wont_overwrite: ...@@ -285,14 +381,16 @@ wont_overwrite:
* r2 = BSS start * r2 = BSS start
* r3 = BSS end * r3 = BSS end
* r4 = kernel execution address * r4 = kernel execution address
* r5 = appended dtb size (0 if not present)
* r7 = architecture ID * r7 = architecture ID
* r8 = atags pointer * r8 = atags pointer
* r11 = GOT start * r11 = GOT start
* r12 = GOT end * r12 = GOT end
* sp = stack pointer * sp = stack pointer
*/ */
teq r0, #0 orrs r1, r0, r5
beq not_relocated beq not_relocated
add r11, r11, r0 add r11, r11, r0
add r12, r12, r0 add r12, r12, r0
...@@ -307,12 +405,21 @@ wont_overwrite: ...@@ -307,12 +405,21 @@ wont_overwrite:
/* /*
* Relocate all entries in the GOT table. * Relocate all entries in the GOT table.
* Bump bss entries to _edata + dtb size
*/ */
1: ldr r1, [r11, #0] @ relocate entries in the GOT 1: ldr r1, [r11, #0] @ relocate entries in the GOT
add r1, r1, r0 @ table. This fixes up the add r1, r1, r0 @ This fixes up C references
str r1, [r11], #4 @ C references. cmp r1, r2 @ if entry >= bss_start &&
cmphs r3, r1 @ bss_end > entry
addhi r1, r1, r5 @ entry += dtb size
str r1, [r11], #4 @ next entry
cmp r11, r12 cmp r11, r12
blo 1b blo 1b
/* bump our bss pointers too */
add r2, r2, r5
add r3, r3, r5
#else #else
/* /*
......
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#define fdt16_to_cpu(x) be16_to_cpu(x)
#define cpu_to_fdt16(x) cpu_to_be16(x)
#define fdt32_to_cpu(x) be32_to_cpu(x)
#define cpu_to_fdt32(x) cpu_to_be32(x)
#define fdt64_to_cpu(x) be64_to_cpu(x)
#define cpu_to_fdt64(x) cpu_to_be64(x)
#endif
...@@ -18,14 +18,9 @@ ...@@ -18,14 +18,9 @@
unsigned int __machine_arch_type; unsigned int __machine_arch_type;
#define _LINUX_STRING_H_
#include <linux/compiler.h> /* for inline */ #include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */ #include <linux/types.h>
#include <linux/stddef.h> /* for NULL */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/string.h>
static void putstr(const char *ptr); static void putstr(const char *ptr);
extern void error(char *x); extern void error(char *x);
...@@ -101,41 +96,6 @@ static void putstr(const char *ptr) ...@@ -101,41 +96,6 @@ static void putstr(const char *ptr)
flush(); flush();
} }
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
int i = 0;
unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
for (i = __n >> 3; i > 0; i--) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 2) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 1) {
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1)
*d++ = *s++;
return __dest;
}
/* /*
* gzip declarations * gzip declarations
*/ */
......
/*
* arch/arm/boot/compressed/string.c
*
* Small subset of simple string routines
*/
#include <linux/string.h>
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
int i = 0;
unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
for (i = __n >> 3; i > 0; i--) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 2) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1 << 1) {
*d++ = *s++;
*d++ = *s++;
}
if (__n & 1)
*d++ = *s++;
return __dest;
}
void *memmove(void *__dest, __const void *__src, size_t count)
{
unsigned char *d = __dest;
const unsigned char *s = __src;
if (__dest == __src)
return __dest;
if (__dest < __src)
return memcpy(__dest, __src, count);
while (count--)
d[count] = s[count];
return __dest;
}
size_t strlen(const char *s)
{
const char *sc = s;
while (*sc != '\0')
sc++;
return sc - s;
}
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count;
int res = 0;
while (su1 < end) {
res = *su1++ - *su2++;
if (res)
break;
}
return res;
}
int strcmp(const char *cs, const char *ct)
{
unsigned char c1, c2;
int res = 0;
do {
c1 = *cs++;
c2 = *ct++;
res = c1 - c2;
if (res)
break;
} while (c1);
return res;
}
void *memchr(const void *s, int c, size_t count)
{
const unsigned char *p = s;
while (count--)
if ((unsigned char)c == *p++)
return (void *)(p - 1);
return NULL;
}
char *strchr(const char *s, int c)
{
while (*s != (char)c)
if (*s++ == '\0')
return NULL;
return (char *)s;
}
#undef memset
void *memset(void *s, int c, size_t count)
{
char *xs = s;
while (count--)
*xs++ = c;
return s;
}
void __memzero(void *s, size_t count)
{
memset(s, 0, count);
}
...@@ -51,6 +51,10 @@ SECTIONS ...@@ -51,6 +51,10 @@ SECTIONS
_got_start = .; _got_start = .;
.got : { *(.got) } .got : { *(.got) }
_got_end = .; _got_end = .;
/* ensure the zImage file size is always a multiple of 64 bits */
/* (without a dummy byte, ld just ignores the empty section) */
.pad : { BYTE(0); . = ALIGN(8); }
_edata = .; _edata = .;
. = BSS_START; . = BSS_START;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpu_pm.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/io.h> #include <linux/io.h>
...@@ -276,6 +277,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic, ...@@ -276,6 +277,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
if (gic_irqs > 1020) if (gic_irqs > 1020)
gic_irqs = 1020; gic_irqs = 1020;
gic->gic_irqs = gic_irqs;
/* /*
* Set all global interrupts to be level triggered, active low. * Set all global interrupts to be level triggered, active low.
*/ */
...@@ -343,6 +346,189 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) ...@@ -343,6 +346,189 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(1, base + GIC_CPU_CTRL); writel_relaxed(1, base + GIC_CPU_CTRL);
} }
#ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
* with interrupts disabled but before powering down the GIC. After calling
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
static void gic_dist_save(unsigned int gic_nr)
{
unsigned int gic_irqs;
void __iomem *dist_base;
int i;
if (gic_nr >= MAX_GIC_NR)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
if (!dist_base)
return;
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
gic_data[gic_nr].saved_spi_conf[i] =
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
gic_data[gic_nr].saved_spi_target[i] =
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
gic_data[gic_nr].saved_spi_enable[i] =
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
}
/*
* Restores the GIC distributor registers during resume or when coming out of
* idle. Must be called before enabling interrupts. If a level interrupt
* that occured while the GIC was suspended is still present, it will be
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
static void gic_dist_restore(unsigned int gic_nr)
{
unsigned int gic_irqs;
unsigned int i;
void __iomem *dist_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
if (!dist_base)
return;
writel_relaxed(0, dist_base + GIC_DIST_CTRL);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
writel_relaxed(0xa0a0a0a0,
dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
dist_base + GIC_DIST_ENABLE_SET + i * 4);
writel_relaxed(1, dist_base + GIC_DIST_CTRL);
}
static void gic_cpu_save(unsigned int gic_nr)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
if (!dist_base || !cpu_base)
return;
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
}
static void gic_cpu_restore(unsigned int gic_nr)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
if (!dist_base || !cpu_base)
return;
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
int i;
for (i = 0; i < MAX_GIC_NR; i++) {
switch (cmd) {
case CPU_PM_ENTER:
gic_cpu_save(i);
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
gic_cpu_restore(i);
break;
case CPU_CLUSTER_PM_ENTER:
gic_dist_save(i);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
gic_dist_restore(i);
break;
}
}
return NOTIFY_OK;
}
static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
static void __init gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
BUG_ON(!gic->saved_ppi_enable);
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
sizeof(u32));
BUG_ON(!gic->saved_ppi_conf);
cpu_pm_register_notifier(&gic_notifier_block);
}
#else
static void __init gic_pm_init(struct gic_chip_data *gic)
{
}
#endif
void __init gic_init(unsigned int gic_nr, unsigned int irq_start, void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
void __iomem *dist_base, void __iomem *cpu_base) void __iomem *dist_base, void __iomem *cpu_base)
{ {
...@@ -358,8 +544,10 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start, ...@@ -358,8 +544,10 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
if (gic_nr == 0) if (gic_nr == 0)
gic_cpu_base_addr = cpu_base; gic_cpu_base_addr = cpu_base;
gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic, irq_start); gic_dist_init(gic, irq_start);
gic_cpu_init(gic); gic_cpu_init(gic);
gic_pm_init(gic);
} }
void __cpuinit gic_secondary_init(unsigned int gic_nr) void __cpuinit gic_secondary_init(unsigned int gic_nr)
......
...@@ -205,6 +205,13 @@ extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, ...@@ -205,6 +205,13 @@ extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
int dma_mmap_writecombine(struct device *, struct vm_area_struct *, int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t); void *, dma_addr_t, size_t);
/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
*/
extern void __init init_consistent_dma_size(unsigned long size);
#ifdef CONFIG_DMABOUNCE #ifdef CONFIG_DMABOUNCE
/* /*
......
...@@ -46,6 +46,14 @@ struct gic_chip_data { ...@@ -46,6 +46,14 @@ struct gic_chip_data {
unsigned int irq_offset; unsigned int irq_offset;
void __iomem *dist_base; void __iomem *dist_base;
void __iomem *cpu_base; void __iomem *cpu_base;
#ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
u32 __percpu *saved_ppi_enable;
u32 __percpu *saved_ppi_conf;
#endif
unsigned int gic_irqs;
}; };
#endif #endif
......
...@@ -50,6 +50,7 @@ static inline void decode_ctrl_reg(u32 reg, ...@@ -50,6 +50,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V6_1 2 #define ARM_DEBUG_ARCH_V6_1 2
#define ARM_DEBUG_ARCH_V7_ECP14 3 #define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4 #define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
/* Breakpoint */ /* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0 #define ARM_BREAKPOINT_EXECUTE 0
...@@ -57,6 +58,7 @@ static inline void decode_ctrl_reg(u32 reg, ...@@ -57,6 +58,7 @@ static inline void decode_ctrl_reg(u32 reg,
/* Watchpoints */ /* Watchpoints */
#define ARM_BREAKPOINT_LOAD 1 #define ARM_BREAKPOINT_LOAD 1
#define ARM_BREAKPOINT_STORE 2 #define ARM_BREAKPOINT_STORE 2
#define ARM_FSR_ACCESS_MASK (1 << 11)
/* Privilege Levels */ /* Privilege Levels */
#define ARM_BREAKPOINT_PRIV 1 #define ARM_BREAKPOINT_PRIV 1
......
...@@ -17,7 +17,7 @@ struct sys_timer; ...@@ -17,7 +17,7 @@ struct sys_timer;
struct machine_desc { struct machine_desc {
unsigned int nr; /* architecture number */ unsigned int nr; /* architecture number */
const char *name; /* architecture name */ const char *name; /* architecture name */
unsigned long boot_params; /* tagged list */ unsigned long atag_offset; /* tagged list (relative) */
const char **dt_compat; /* array of device tree const char **dt_compat; /* array of device tree
* 'compatible' strings */ * 'compatible' strings */
......
...@@ -29,6 +29,7 @@ struct map_desc { ...@@ -29,6 +29,7 @@ struct map_desc {
#define MT_MEMORY_NONCACHED 11 #define MT_MEMORY_NONCACHED 11
#define MT_MEMORY_DTCM 12 #define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13 #define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int); extern void iotable_init(struct map_desc *, int);
......
...@@ -77,16 +77,7 @@ ...@@ -77,16 +77,7 @@
*/ */
#define IOREMAP_MAX_ORDER 24 #define IOREMAP_MAX_ORDER 24
/*
* Size of DMA-consistent memory region. Must be multiple of 2M,
* between 2MB and 14MB inclusive.
*/
#ifndef CONSISTENT_DMA_SIZE
#define CONSISTENT_DMA_SIZE SZ_2M
#endif
#define CONSISTENT_END (0xffe00000UL) #define CONSISTENT_END (0xffe00000UL)
#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
......
...@@ -232,6 +232,9 @@ extern pgprot_t pgprot_kernel; ...@@ -232,6 +232,9 @@ extern pgprot_t pgprot_kernel;
#define pgprot_writecombine(prot) \ #define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
#define pgprot_stronglyordered(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \ #define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
......
...@@ -13,7 +13,12 @@ ...@@ -13,7 +13,12 @@
#define __ARM_PMU_H__ #define __ARM_PMU_H__
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/perf_event.h>
/*
* Types of PMUs that can be accessed directly and require mutual
* exclusion between profiling tools.
*/
enum arm_pmu_type { enum arm_pmu_type {
ARM_PMU_DEVICE_CPU = 0, ARM_PMU_DEVICE_CPU = 0,
ARM_NUM_PMU_DEVICES, ARM_NUM_PMU_DEVICES,
...@@ -37,21 +42,17 @@ struct arm_pmu_platdata { ...@@ -37,21 +42,17 @@ struct arm_pmu_platdata {
* reserve_pmu() - reserve the hardware performance counters * reserve_pmu() - reserve the hardware performance counters
* *
* Reserve the hardware performance counters in the system for exclusive use. * Reserve the hardware performance counters in the system for exclusive use.
* The platform_device for the system is returned on success, ERR_PTR() * Returns 0 on success or -EBUSY if the lock is already held.
* encoded error on failure.
*/ */
extern struct platform_device * extern int
reserve_pmu(enum arm_pmu_type type); reserve_pmu(enum arm_pmu_type type);
/** /**
* release_pmu() - Relinquish control of the performance counters * release_pmu() - Relinquish control of the performance counters
* *
* Release the performance counters and allow someone else to use them. * Release the performance counters and allow someone else to use them.
* Callers must have disabled the counters and released IRQs before calling
* this. The platform_device returned from reserve_pmu() must be passed as
* a cookie.
*/ */
extern int extern void
release_pmu(enum arm_pmu_type type); release_pmu(enum arm_pmu_type type);
/** /**
...@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type); ...@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type);
#include <linux/err.h> #include <linux/err.h>
static inline struct platform_device *
reserve_pmu(enum arm_pmu_type type)
{
return ERR_PTR(-ENODEV);
}
static inline int static inline int
release_pmu(enum arm_pmu_type type) reserve_pmu(enum arm_pmu_type type)
{ {
return -ENODEV; return -ENODEV;
} }
static inline int static inline void
init_pmu(enum arm_pmu_type type) release_pmu(enum arm_pmu_type type) { }
{
return -ENODEV;
}
#endif /* CONFIG_CPU_HAS_PMU */ #endif /* CONFIG_CPU_HAS_PMU */
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
struct pmu_hw_events {
/*
* The events that are active on the PMU for the given index.
*/
struct perf_event **events;
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned long *used_mask;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
};
struct arm_pmu {
struct pmu pmu;
enum arm_perf_pmu_ids id;
enum arm_pmu_type type;
cpumask_t active_irqs;
const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct hw_perf_event *hwc);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(int idx);
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
struct pmu_hw_events *(*get_hw_events)(void);
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx, int overflow);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx);
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */ #endif /* __ARM_PMU_H__ */
...@@ -81,6 +81,10 @@ extern void cpu_dcache_clean_area(void *, int); ...@@ -81,6 +81,10 @@ extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
/* These three are private to arch/arm/kernel/suspend.c */
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else #else
#define cpu_proc_init processor._proc_init #define cpu_proc_init processor._proc_init
#define cpu_proc_fin processor._proc_fin #define cpu_proc_fin processor._proc_fin
...@@ -89,6 +93,10 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); ...@@ -89,6 +93,10 @@ extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#define cpu_dcache_clean_area processor.dcache_clean_area #define cpu_dcache_clean_area processor.dcache_clean_area
#define cpu_set_pte_ext processor.set_pte_ext #define cpu_set_pte_ext processor.set_pte_ext
#define cpu_do_switch_mm processor.switch_mm #define cpu_do_switch_mm processor.switch_mm
/* These three are private to arch/arm/kernel/suspend.c */
#define cpu_do_suspend processor.do_suspend
#define cpu_do_resume processor.do_resume
#endif #endif
extern void cpu_resume(void); extern void cpu_resume(void);
......
#ifndef __ASM_ARM_SUSPEND_H #ifndef __ASM_ARM_SUSPEND_H
#define __ASM_ARM_SUSPEND_H #define __ASM_ARM_SUSPEND_H
#include <asm/memory.h>
#include <asm/tlbflush.h>
extern void cpu_resume(void); extern void cpu_resume(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
/*
* Hide the first two arguments to __cpu_suspend - these are an implementation
* detail which platform code shouldn't have to know about.
*/
static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
extern int __cpu_suspend(int, long, unsigned long,
int (*)(unsigned long));
int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
flush_tlb_all();
return ret;
}
#endif #endif
...@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o ...@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_PM_SLEEP) += sleep.o obj-$(CONFIG_PM_SLEEP) += sleep.o suspend.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
...@@ -43,6 +43,13 @@ obj-$(CONFIG_KPROBES) += kprobes-thumb.o ...@@ -43,6 +43,13 @@ obj-$(CONFIG_KPROBES) += kprobes-thumb.o
else else
obj-$(CONFIG_KPROBES) += kprobes-arm.o obj-$(CONFIG_KPROBES) += kprobes-arm.o
endif endif
obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
test-kprobes-objs := kprobes-test.o
ifdef CONFIG_THUMB2_KERNEL
test-kprobes-objs += kprobes-test-thumb.o
else
test-kprobes-objs += kprobes-test-arm.o
endif
obj-$(CONFIG_ATAGS_PROC) += atags.o obj-$(CONFIG_ATAGS_PROC) += atags.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_ARM_THUMBEE) += thumbee.o obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
......
This diff is collapsed.
...@@ -60,6 +60,7 @@ ...@@ -60,6 +60,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/module.h>
#include "kprobes.h" #include "kprobes.h"
...@@ -971,6 +972,9 @@ const union decode_item kprobe_decode_arm_table[] = { ...@@ -971,6 +972,9 @@ const union decode_item kprobe_decode_arm_table[] = {
DECODE_END DECODE_END
}; };
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(kprobe_decode_arm_table);
#endif
static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs) static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/module.h>
#include "kprobes.h" #include "kprobes.h"
...@@ -943,6 +944,9 @@ const union decode_item kprobe_decode_thumb32_table[] = { ...@@ -943,6 +944,9 @@ const union decode_item kprobe_decode_thumb32_table[] = {
*/ */
DECODE_END DECODE_END
}; };
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(kprobe_decode_thumb32_table);
#endif
static void __kprobes static void __kprobes
t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs) t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
...@@ -1423,6 +1427,9 @@ const union decode_item kprobe_decode_thumb16_table[] = { ...@@ -1423,6 +1427,9 @@ const union decode_item kprobe_decode_thumb16_table[] = {
DECODE_END DECODE_END
}; };
#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
EXPORT_SYMBOL_GPL(kprobe_decode_thumb16_table);
#endif
static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
{ {
......
...@@ -413,6 +413,14 @@ struct decode_reject { ...@@ -413,6 +413,14 @@ struct decode_reject {
DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0) DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
#ifdef CONFIG_THUMB2_KERNEL
extern const union decode_item kprobe_decode_thumb16_table[];
extern const union decode_item kprobe_decode_thumb32_table[];
#else
extern const union decode_item kprobe_decode_arm_table[];
#endif
int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
const union decode_item *table, bool thumb16); const union decode_item *table, bool thumb16);
......
This diff is collapsed.
...@@ -54,7 +54,7 @@ enum armv6_perf_types { ...@@ -54,7 +54,7 @@ enum armv6_perf_types {
}; };
enum armv6_counters { enum armv6_counters {
ARMV6_CYCLE_COUNTER = 1, ARMV6_CYCLE_COUNTER = 0,
ARMV6_COUNTER0, ARMV6_COUNTER0,
ARMV6_COUNTER1, ARMV6_COUNTER1,
}; };
...@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, ...@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0; mask = 0;
...@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, ...@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event * Mask out the current event and set the counter to count the event
* that we're interested in. * that we're interested in.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int counter_is_active(unsigned long pmcr, int idx)
{
unsigned long mask = 0;
if (idx == ARMV6_CYCLE_COUNTER)
mask = ARMV6_PMCR_CCOUNT_IEN;
else if (idx == ARMV6_COUNTER0)
mask = ARMV6_PMCR_COUNT0_IEN;
else if (idx == ARMV6_COUNTER1)
mask = ARMV6_PMCR_COUNT1_IEN;
if (mask)
return pmcr & mask;
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return 0;
} }
static irqreturn_t static irqreturn_t
...@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num, ...@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
{ {
unsigned long pmcr = armv6_pmcr_read(); unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num, ...@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask)) if (!counter_is_active(pmcr, idx))
continue; continue;
/* /*
...@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num, ...@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
/* /*
...@@ -527,28 +545,30 @@ static void ...@@ -527,28 +545,30 @@ static void
armv6pmu_start(void) armv6pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE; val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
armv6pmu_stop(void) armv6pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE; val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
/* Always place a cycle counter into the cycle counter. */ /* Always place a cycle counter into the cycle counter. */
...@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, ...@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
...@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, ...@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should * of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment. * be disabled and so this should never increment.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
...@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, ...@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx) int idx)
{ {
unsigned long val, mask, flags, evt = 0; unsigned long val, mask, flags, evt = 0;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) { if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN; mask = ARMV6_PMCR_CCOUNT_IEN;
...@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, ...@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We * Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting. * simply disable the interrupt reporting.
*/ */
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read(); val = armv6_pmcr_read();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
armv6_pmcr_write(val); armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int armv6_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6_perf_map,
&armv6_perf_cache_map, 0xFF);
} }
static const struct arm_pmu armv6pmu = { static struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6, .id = ARM_PERF_PMU_ID_V6,
.name = "v6", .name = "v6",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
...@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = { ...@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6_perf_cache_map, .map_event = armv6_map_event,
.event_map = &armv6_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return &armv6pmu; return &armv6pmu;
} }
...@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void) ...@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
* disable the interrupt reporting and update the event. When unthrottling we * disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting. * reset the period and enable the interrupt reporting.
*/ */
static const struct arm_pmu armv6mpcore_pmu = {
static int armv6mpcore_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv6mpcore_perf_map,
&armv6mpcore_perf_cache_map, 0xFF);
}
static struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP, .id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore", .name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq, .handle_irq = armv6pmu_handle_irq,
...@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = { ...@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
.get_event_idx = armv6pmu_get_event_idx, .get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start, .start = armv6pmu_start,
.stop = armv6pmu_stop, .stop = armv6pmu_stop,
.cache_map = &armv6mpcore_perf_cache_map, .map_event = armv6mpcore_map_event,
.event_map = &armv6mpcore_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return &armv6mpcore_pmu; return &armv6mpcore_pmu;
} }
#else #else
static const struct arm_pmu *__init armv6pmu_init(void) static struct arm_pmu *__init armv6pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init armv6mpcore_pmu_init(void) static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return NULL; return NULL;
} }
......
This diff is collapsed.
...@@ -40,7 +40,7 @@ enum xscale_perf_types { ...@@ -40,7 +40,7 @@ enum xscale_perf_types {
}; };
enum xscale_counters { enum xscale_counters {
XSCALE_CYCLE_COUNTER = 1, XSCALE_CYCLE_COUNTER = 0,
XSCALE_COUNTER0, XSCALE_COUNTER0,
XSCALE_COUNTER1, XSCALE_COUNTER1,
XSCALE_COUNTER2, XSCALE_COUNTER2,
...@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc; unsigned long pmnc;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
...@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
...@@ -284,6 +281,7 @@ static void ...@@ -284,6 +281,7 @@ static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
...@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long val, mask, evt, flags; unsigned long val, mask, evt, flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) { switch (idx) {
case XSCALE_CYCLE_COUNTER: case XSCALE_CYCLE_COUNTER:
...@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) ...@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~mask; val &= ~mask;
val |= evt; val |= evt;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
if (XSCALE_PERFCTR_CCNT == event->config_base) { if (XSCALE_PERFCTR_CCNT == event->config_base) {
...@@ -368,24 +367,26 @@ static void ...@@ -368,24 +367,26 @@ static void
xscale1pmu_start(void) xscale1pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale1pmu_stop(void) xscale1pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc(); val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val); xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
...@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val) ...@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale1pmu = { static int xscale_map_event(struct perf_event *event)
{
return map_cpu_event(event, &xscale_perf_map,
&xscale_perf_cache_map, 0xFF);
}
static struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1, .id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1", .name = "xscale1",
.handle_irq = xscale1pmu_handle_irq, .handle_irq = xscale1pmu_handle_irq,
...@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = { ...@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
.get_event_idx = xscale1pmu_get_event_idx, .get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start, .start = xscale1pmu_start,
.stop = xscale1pmu_stop, .stop = xscale1pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3, .num_events = 3,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return &xscale1pmu; return &xscale1pmu;
} }
...@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{ {
unsigned long pmnc, of_flags; unsigned long pmnc, of_flags;
struct perf_sample_data data; struct perf_sample_data data;
struct cpu_hw_events *cpuc; struct pmu_hw_events *cpuc;
struct pt_regs *regs; struct pt_regs *regs;
int idx; int idx;
...@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue; continue;
...@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue; continue;
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); cpu_pmu->disable(hwc, idx);
} }
irq_work_run(); irq_work_run();
...@@ -616,6 +618,7 @@ static void ...@@ -616,6 +618,7 @@ static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
...@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) ...@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{ {
unsigned long flags, ien, evtsel; unsigned long flags, ien, evtsel;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable(); ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select(); evtsel = xscale2pmu_read_event_select();
...@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) ...@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return; return;
} }
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel); xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien); xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static int static int
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event) struct hw_perf_event *event)
{ {
int idx = xscale1pmu_get_event_idx(cpuc, event); int idx = xscale1pmu_get_event_idx(cpuc, event);
...@@ -718,24 +722,26 @@ static void ...@@ -718,24 +722,26 @@ static void
xscale2pmu_start(void) xscale2pmu_start(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE; val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static void static void
xscale2pmu_stop(void) xscale2pmu_stop(void)
{ {
unsigned long flags, val; unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
raw_spin_lock_irqsave(&pmu_lock, flags); raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc(); val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE; val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val); xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
} }
static inline u32 static inline u32
...@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val) ...@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
} }
} }
static const struct arm_pmu xscale2pmu = { static struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2, .id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2", .name = "xscale2",
.handle_irq = xscale2pmu_handle_irq, .handle_irq = xscale2pmu_handle_irq,
...@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = { ...@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
.get_event_idx = xscale2pmu_get_event_idx, .get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start, .start = xscale2pmu_start,
.stop = xscale2pmu_stop, .stop = xscale2pmu_stop,
.cache_map = &xscale_perf_cache_map, .map_event = xscale_map_event,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 5, .num_events = 5,
.max_period = (1LLU << 32) - 1, .max_period = (1LLU << 32) - 1,
}; };
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return &xscale2pmu; return &xscale2pmu;
} }
#else #else
static const struct arm_pmu *__init xscale1pmu_init(void) static struct arm_pmu *__init xscale1pmu_init(void)
{ {
return NULL; return NULL;
} }
static const struct arm_pmu *__init xscale2pmu_init(void) static struct arm_pmu *__init xscale2pmu_init(void)
{ {
return NULL; return NULL;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -128,8 +128,6 @@ ...@@ -128,8 +128,6 @@
#define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */ #define AT91SAM9G45_EHCI_BASE 0x00800000 /* USB Host controller (EHCI) */
#define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */ #define AT91SAM9G45_VDEC_BASE 0x00900000 /* Video Decoder Controller */
#define CONSISTENT_DMA_SIZE SZ_4M
/* /*
* DMA peripheral identifiers * DMA peripheral identifiers
* for hardware handshaking interface * for hardware handshaking interface
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment