Commit b00560f2 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'perf/urgent' into perf/core

Merge reason: we need to queue up dependent patch
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents bf1af3a8 4fe757dd
......@@ -73,8 +73,8 @@
services.
</para>
<para>
The core of every DRM driver is struct drm_device. Drivers
will typically statically initialize a drm_device structure,
The core of every DRM driver is struct drm_driver. Drivers
will typically statically initialize a drm_driver structure,
then pass it to drm_init() at load time.
</para>
......@@ -84,7 +84,7 @@
<title>Driver initialization</title>
<para>
Before calling the DRM initialization routines, the driver must
first create and fill out a struct drm_device structure.
first create and fill out a struct drm_driver structure.
</para>
<programlisting>
static struct drm_driver driver = {
......
......@@ -2126,6 +2126,7 @@ S: Supported
F: fs/dlm/
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
S: Supported
F: drivers/dma/
......@@ -2774,6 +2775,15 @@ F: Documentation/isdn/README.gigaset
F: drivers/isdn/gigaset/
F: include/linux/gigaset_dev.h
GPIO SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.secretlab.ca/git/linux-2.6.git
F: Documentation/gpio/gpio.txt
F: drivers/gpio/
F: include/linux/gpio*
GRETH 10/100/1G Ethernet MAC device driver
M: Kristoffer Glembo <kristoffer@gaisler.com>
L: netdev@vger.kernel.org
......@@ -4591,7 +4601,7 @@ F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely <grant.likely@secretlab.ca>
L: devicetree-discuss@lists.ozlabs.org
L: devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
W: http://fdt.secretlab.ca
T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
......
......@@ -1391,7 +1391,7 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
depends on AEABI && EXPERIMENTAL
depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
default y
help
This option preserves the old syscall interface along with the
......
......@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP
__INIT
__fixup_smp:
and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported?
......@@ -415,18 +416,7 @@ __fixup_smp_on_up:
sub r3, r0, r3
add r4, r4, r3
add r5, r5, r3
2: cmp r4, r5
movhs pc, lr
ldmia r4!, {r0, r6}
ARM( str r6, [r0, r3] )
THUMB( add r0, r0, r3 )
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
THUMB( strh r6, [r0] )
b 2b
b __do_fixup_smp_on_up
ENDPROC(__fixup_smp)
.align
......@@ -440,7 +430,31 @@ smp_on_up:
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
#endif
.text
__do_fixup_smp_on_up:
cmp r4, r5
movhs pc, lr
ldmia r4!, {r0, r6}
ARM( str r6, [r0, r3] )
THUMB( add r0, r0, r3 )
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
THUMB( strh r6, [r0] )
b __do_fixup_smp_on_up
ENDPROC(__do_fixup_smp_on_up)
ENTRY(fixup_smp)
stmfd sp!, {r4 - r6, lr}
mov r4, r0
add r5, r0, r1
mov r3, #0
bl __do_fixup_smp_on_up
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
#include "head-common.S"
......@@ -137,11 +137,10 @@ static u8 get_debug_arch(void)
u32 didr;
/* Do we implement the extended CPUID interface? */
if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
pr_warning("CPUID feature registers not supported. "
"Assuming v6 debug is present.\n");
if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
"CPUID feature registers not supported. "
"Assuming v6 debug is present.\n"))
return ARM_DEBUG_ARCH_V6;
}
ARM_DBG_READ(c0, 0, didr);
return (didr >> 16) & 0xf;
......@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void)
return debug_arch;
}
static int debug_arch_supported(void)
{
u8 arch = get_debug_arch();
return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
}
/* Determine number of BRP register available. */
static int get_num_brp_resources(void)
{
......@@ -268,6 +273,9 @@ static int enable_monitor_mode(void)
int hw_breakpoint_slots(int type)
{
if (!debug_arch_supported())
return 0;
/*
* We can be called early, so don't rely on
* our static variables being initialised.
......@@ -834,11 +842,11 @@ static void reset_ctrl_regs(void *unused)
/*
* v7 debug contains save and restore registers so that debug state
* can be maintained across low-power modes without leaving
* the debug logic powered up. It is IMPLEMENTATION DEFINED whether
* we can write to the debug registers out of reset, so we must
* unlock the OS Lock Access Register to avoid taking undefined
* instruction exceptions later on.
* can be maintained across low-power modes without leaving the debug
* logic powered up. It is IMPLEMENTATION DEFINED whether we can access
* the debug registers out of reset, so we must unlock the OS Lock
* Access Register to avoid taking undefined instruction exceptions
* later on.
*/
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
/*
......@@ -882,7 +890,7 @@ static int __init arch_hw_breakpoint_init(void)
debug_arch = get_debug_arch();
if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
if (!debug_arch_supported()) {
pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
return 0;
}
......@@ -899,18 +907,18 @@ static int __init arch_hw_breakpoint_init(void)
pr_info("%d breakpoint(s) reserved for watchpoint "
"single-step.\n", core_num_reserved_brps);
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {
pr_warning("halting debug mode enabled. Assuming maximum "
"watchpoint size of 4 bytes.");
} else {
/*
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
smp_call_function(reset_ctrl_regs, NULL, 1);
reset_ctrl_regs(NULL);
on_each_cpu(reset_ctrl_regs, NULL, 1);
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {
max_watchpoint_len = 4;
pr_warning("halting debug mode enabled. Assuming maximum "
"watchpoint size of %u bytes.", max_watchpoint_len);
} else {
/* Work out the maximum supported watchpoint length. */
max_watchpoint_len = get_max_wp_len();
pr_info("maximum watchpoint size is %u bytes.\n",
......
......@@ -22,6 +22,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
#ifdef CONFIG_XIP_KERNEL
......@@ -268,12 +269,28 @@ struct mod_unwind_map {
const Elf_Shdr *txt_sec;
};
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
const Elf_Shdr *sechdrs, const char *name)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
if (strcmp(name, secstrs + s->sh_name) == 0)
return s;
return NULL;
}
extern void fixup_smp(const void *, unsigned long);
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
const Elf_Shdr * __maybe_unused s = NULL;
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
struct mod_unwind_map maps[ARM_SEC_MAX];
int i;
......@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp())
fixup_smp((void *)s->sh_addr, s->sh_size);
return 0;
}
......
......@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail,
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail >= buftail.fp)
if (tail + 1 >= buftail.fp)
return NULL;
return buftail.fp - 1;
......
......@@ -50,7 +50,7 @@ static void __init colibri_mmc_init(void)
GPIO0_COLIBRI_PXA270_SD_DETECT;
if (machine_is_colibri300()) /* PXA300 Colibri */
colibri_mci_platform_data.gpio_card_detect =
GPIO39_COLIBRI_PXA300_SD_DETECT;
GPIO13_COLIBRI_PXA300_SD_DETECT;
else /* PXA320 Colibri */
colibri_mci_platform_data.gpio_card_detect =
GPIO28_COLIBRI_PXA320_SD_DETECT;
......
......@@ -41,7 +41,7 @@ static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = {
GPIO4_MMC1_DAT1,
GPIO5_MMC1_DAT2,
GPIO6_MMC1_DAT3,
GPIO39_GPIO, /* SD detect */
GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */
/* UHC */
GPIO0_2_USBH_PEN,
......
......@@ -60,7 +60,7 @@ static inline void colibri_pxa3xx_init_nand(void) {}
#define GPIO113_COLIBRI_PXA270_TS_IRQ 113
/* GPIO definitions for Colibri PXA300/310 */
#define GPIO39_COLIBRI_PXA300_SD_DETECT 39
#define GPIO13_COLIBRI_PXA300_SD_DETECT 13
/* GPIO definitions for Colibri PXA320 */
#define GPIO28_COLIBRI_PXA320_SD_DETECT 28
......
......@@ -323,7 +323,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = {
.pwm_id = 0,
.max_brightness = 0xfe,
.dft_brightness = 0x7e,
.pwm_period_ns = 3500,
.pwm_period_ns = 3500 * 1024,
.init = palm27x_backlight_init,
.notify = palm27x_backlight_notify,
.exit = palm27x_backlight_exit,
......
......@@ -33,7 +33,7 @@ int pxa_pm_enter(suspend_state_t state)
#endif
/* skip registers saving for standby */
if (state != PM_SUSPEND_STANDBY) {
if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) {
pxa_cpu_pm_fns->save(sleep_save);
/* before sleeping, calculate and save a checksum */
for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
......@@ -44,7 +44,7 @@ int pxa_pm_enter(suspend_state_t state)
pxa_cpu_pm_fns->enter(state);
cpu_init();
if (state != PM_SUSPEND_STANDBY) {
if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
/* after sleeping, validate the checksum */
for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
checksum += sleep_save[i];
......
......@@ -122,6 +122,7 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select S5PV310_DEV_PD
select S5PV310_DEV_SYSMMU
select S5PV310_SETUP_I2C1
select S5PV310_SETUP_SDHCI
help
......
......@@ -124,8 +124,6 @@
#define S5PV310_PA_SYSMMU_TV 0x12E20000
#define S5PV310_PA_SYSMMU_MFC_L 0x13620000
#define S5PV310_PA_SYSMMU_MFC_R 0x13630000
#define S5PV310_SYSMMU_TOTAL_IPNUM 16
#define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM
/* compatibiltiy defines. */
#define S3C_PA_UART S5PV310_PA_UART
......
......@@ -13,6 +13,9 @@
#ifndef __ASM_ARM_ARCH_SYSMMU_H
#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
#define S5PV310_SYSMMU_TOTAL_IPNUM 16
#define S5P_SYSMMU_TOTAL_IPNUM S5PV310_SYSMMU_TOTAL_IPNUM
enum s5pv310_sysmmu_ips {
SYSMMU_MDMA,
SYSMMU_SSS,
......@@ -32,7 +35,7 @@ enum s5pv310_sysmmu_ips {
SYSMMU_MFC_R,
};
static char *sysmmu_ips_name[S5P_SYSMMU_TOTAL_IPNUM] = {
static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = {
"SYSMMU_MDMA" ,
"SYSMMU_SSS" ,
"SYSMMU_FIMC0" ,
......
......@@ -241,6 +241,9 @@ static struct locomo_platform_data locomo_info = {
struct platform_device collie_locomo_device = {
.name = "locomo",
.id = 0,
.dev = {
.platform_data = &locomo_info,
},
.num_resources = ARRAY_SIZE(locomo_resources),
.resource = locomo_resources,
};
......
......@@ -405,7 +405,7 @@ config CPU_V6
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
depends on CPU_V6 || CPU_V7
default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
default y if SMP
help
Say Y here if your ARMv6 processor supports the 'K' extension.
This enables the kernel to use some instructions not present
......@@ -416,7 +416,7 @@ config CPU_32v6K
# ARMv7
config CPU_V7
bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
select CPU_32v6K if !ARCH_OMAP2
select CPU_32v6K
select CPU_32v7
select CPU_ABRT_EV7
select CPU_PABRT_V7
......@@ -644,7 +644,7 @@ config ARM_THUMBEE
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
depends on CPU_V7 && !CPU_V6
depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
select HAVE_PROC_CPU if PROC_FS
default y if SMP
help
......
......@@ -10,8 +10,6 @@
*/
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/oprofile.h>
......@@ -46,6 +44,7 @@ char *op_name_from_perf_id(void)
return NULL;
}
}
#endif
static int report_trace(struct stackframe *frame, void *d)
{
......@@ -85,7 +84,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail)
/* frame pointers should strictly progress back up the stack
* (towards higher addresses) */
if (tail >= buftail[0].fp)
if (tail + 1 >= buftail[0].fp)
return NULL;
return buftail[0].fp-1;
......@@ -111,6 +110,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
/* provide backtrace support also in timer mode: */
ops->backtrace = arm_backtrace;
return oprofile_perf_init(ops);
......@@ -120,11 +120,3 @@ void __exit oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
#else
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
pr_info("oprofile: hardware counters not available\n");
return -ENODEV;
}
void __exit oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */
......@@ -139,10 +139,11 @@ static const unsigned long mfpr_edge[] = {
#define mfp_configured(p) ((p)->config != -1)
/*
* perform a read-back of any MFPR register to make sure the
* perform a read-back of any valid MFPR register to make sure the
* previous writings are finished
*/
#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + 0)
static unsigned long mfpr_off_readback;
#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
static inline void __mfp_config_run(struct mfp_pin *p)
{
......@@ -248,6 +249,9 @@ void __init mfp_init_addr(struct mfp_addr_map *map)
spin_lock_irqsave(&mfp_spin_lock, flags);
/* mfp offset for readback */
mfpr_off_readback = map[0].offset;
for (p = map; p->start != MFP_PIN_INVALID; p++) {
offset = p->offset;
i = p->start;
......
......@@ -37,6 +37,14 @@ config S5P_GPIO_INT
help
Common code for the GPIO interrupts (other than external interrupts.)
comment "System MMU"
config S5P_SYSTEM_MMU
bool "S5P SYSTEM MMU"
depends on ARCH_S5PV310
help
Say Y here if you want to enable System MMU
config S5P_DEV_FIMC0
bool
help
......@@ -66,19 +74,3 @@ config S5P_DEV_CSIS1
bool
help
Compile in platform device definitions for MIPI-CSIS channel 1
menuconfig S5P_SYSMMU
bool "SYSMMU support"
depends on ARCH_S5PV310
help
This is a System MMU driver for Samsung ARM based Soc.
if S5P_SYSMMU
config S5P_SYSMMU_DEBUG
bool "Enables debug messages"
depends on S5P_SYSMMU
help
This enables SYSMMU driver debug massages.
endif
......@@ -19,6 +19,7 @@ obj-y += clock.o
obj-y += irq.o
obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o
obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM) += irq-pm.o
......@@ -30,4 +31,3 @@ obj-$(CONFIG_S5P_DEV_FIMC2) += dev-fimc2.o
obj-$(CONFIG_S5P_DEV_ONENAND) += dev-onenand.o
obj-$(CONFIG_S5P_DEV_CSIS0) += dev-csis0.o
obj-$(CONFIG_S5P_DEV_CSIS1) += dev-csis1.o
obj-$(CONFIG_S5P_SYSMMU) += sysmmu.o
/* linux/arch/arm/plat-s5p/include/plat/sysmmu.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Samsung sysmmu driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PLAT_S5P_SYSMMU_H
#define __ASM_PLAT_S5P_SYSMMU_H __FILE__
/* debug macro */
#ifdef CONFIG_S5P_SYSMMU_DEBUG
#define sysmmu_debug(fmt, arg...) printk(KERN_INFO "[%s] " fmt, __func__, ## arg)
#else
#define sysmmu_debug(fmt, arg...) do { } while (0)
#endif
#endif /* __ASM_PLAT_S5P_SYSMMU_H */
......@@ -16,8 +16,6 @@
#include <mach/regs-sysmmu.h>
#include <mach/sysmmu.h>
#include <plat/sysmmu.h>
struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM];
void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp)
......@@ -123,7 +121,7 @@ static int s5p_sysmmu_set_tablebase(sysmmu_ips ips)
: "=r" (pg) : : "cc"); \
pg &= ~0x3fff;
sysmmu_debug("CP15 TTBR0 : 0x%x\n", pg);
printk(KERN_INFO "%s: CP15 TTBR0 : 0x%x\n", __func__, pg);
/* Set sysmmu page table base address */
__raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR);
......
......@@ -17,6 +17,8 @@
#include <linux/irq.h>
struct sys_device;
#ifdef CONFIG_PM
extern __init int s3c_pm_init(void);
......
......@@ -10,6 +10,7 @@
#define __BFIN_ASM_SERIAL_H__
#include <linux/serial_core.h>
#include <linux/spinlock.h>
#include <mach/anomaly.h>
#include <mach/bfin_serial.h>
......@@ -41,6 +42,7 @@ struct bfin_serial_port {
struct circ_buf rx_dma_buf;
struct timer_list rx_dma_timer;
int rx_dma_nrows;
spinlock_t rx_lock;
unsigned int tx_dma_channel;
unsigned int rx_dma_channel;
struct work_struct tx_dma_workqueue;
......
......@@ -99,14 +99,12 @@ static inline int strcmp(const char *cs, const char *ct)
: "+a" (cs), "+a" (ct), "=d" (res));
return res;
}
#endif /* CONFIG_COLDFIRE */
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCMP
extern int memcmp(const void *, const void *, __kernel_size_t);
#define memcmp(d, s, n) __builtin_memcmp(d, s, n)
#endif /* CONFIG_COLDFIRE */
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
......
......@@ -243,14 +243,3 @@ void *memmove(void *dest, const void *src, size_t n)
return xdest;
}
EXPORT_SYMBOL(memmove);
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--)
if (*su1 != *su2)
return *su1 < *su2 ? -1 : +1;
return 0;
}
EXPORT_SYMBOL(memcmp);
......@@ -141,6 +141,12 @@ SECTIONS {
*(__param)
__stop___param = .;
/* Built-in module versions */
. = ALIGN(4) ;
__start___modver = .;
*(__modver)
__stop___modver = .;
. = ALIGN(4) ;
_etext = . ;
} > TEXT
......
......@@ -4,4 +4,4 @@
lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
checksum.o memcpy.o memset.o delay.o
checksum.o memcpy.o memmove.o memset.o delay.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#define __IN_STRING_C
#include <linux/module.h>
#include <linux/string.h>
void *memmove(void *dest, const void *src, size_t n)
{
void *xdest = dest;
size_t temp;
if (!n)
return xdest;
if (dest < src) {
if ((long)dest & 1) {
char *cdest = dest;
const char *csrc = src;
*cdest++ = *csrc++;
dest = cdest;
src = csrc;
n--;
}
if (n > 2 && (long)dest & 2) {
short *sdest = dest;
const short *ssrc = src;
*sdest++ = *ssrc++;
dest = sdest;
src = ssrc;
n -= 2;
}
temp = n >> 2;
if (temp) {
long *ldest = dest;
const long *lsrc = src;
temp--;
do
*ldest++ = *lsrc++;
while (temp--);
dest = ldest;
src = lsrc;
}
if (n & 2) {
short *sdest = dest;
const short *ssrc = src;
*sdest++ = *ssrc++;
dest = sdest;
src = ssrc;
}
if (n & 1) {
char *cdest = dest;
const char *csrc = src;
*cdest = *csrc;
}
} else {
dest = (char *)dest + n;
src = (const char *)src + n;
if ((long)dest & 1) {
char *cdest = dest;
const char *csrc = src;
*--cdest = *--csrc;
dest = cdest;
src = csrc;
n--;
}
if (n > 2 && (long)dest & 2) {
short *sdest = dest;
const short *ssrc = src;
*--sdest = *--ssrc;
dest = sdest;
src = ssrc;
n -= 2;
}
temp = n >> 2;
if (temp) {
long *ldest = dest;
const long *lsrc = src;
temp--;
do
*--ldest = *--lsrc;
while (temp--);
dest = ldest;
src = lsrc;
}
if (n & 2) {
short *sdest = dest;
const short *ssrc = src;
*--sdest = *--ssrc;
dest = sdest;
src = ssrc;
}
if (n & 1) {
char *cdest = dest;
const char *csrc = src;
*--cdest = *--csrc;
}
}
return xdest;
}
EXPORT_SYMBOL(memmove);
......@@ -50,8 +50,10 @@ static int __init mcf_intc2_init(void)
int irq;
/* GPIO interrupt sources */
for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++)
for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
irq_desc[irq].chip = &intc2_irq_gpio_chip;
set_irq_handler(irq, handle_edge_irq);
}
return 0;
}
......
......@@ -108,7 +108,6 @@ Luser_return:
movel %d1,%a2
1:
move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
andl #_TIF_WORK_MASK,%d1
jne Lwork_to_do
RESTORE_ALL
......
......@@ -210,7 +210,7 @@ void
cpm_install_handler(int vec, void (*handler)(), void *dev_id)
{
request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id);
request_irq(vec, handler, 0, "timer", dev_id);
/* if (cpm_vecs[vec].handler != 0) */
/* printk(KERN_INFO "CPM interrupt %x replacing %x\n", */
......
......@@ -75,7 +75,7 @@ void hw_timer_init(void)
/* Set compare register 32Khz / 32 / 10 = 100 */
TCMP = 10;
request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL);
request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL);
#endif
/* General purpose quicc timers: MC68360UM p7-20 */
......
......@@ -104,7 +104,6 @@ Luser_return:
movel %d1,%a2
1:
move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
andl #_TIF_WORK_MASK,%d1
jne Lwork_to_do
RESTORE_ALL
......
......@@ -132,8 +132,8 @@ void init_IRQ(void)
pquicc->intr_cimr = 0x00000000;
for (i = 0; (i < NR_IRQS); i++) {
set_irq_chip(irq, &intc_irq_chip);
set_irq_handler(irq, handle_level_irq);
set_irq_chip(i, &intc_irq_chip);
set_irq_handler(i, handle_level_irq);
}
}
......@@ -138,7 +138,6 @@ Luser_return:
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
movel %d1,%a0
movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
andl #0xefff,%d1
jne Lwork_to_do /* still work to do */
Lreturn:
......
......@@ -12,7 +12,7 @@
#include <linux/types.h>
#include <asm/registers.h>
#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
static inline unsigned long arch_local_irq_save(void)
{
......
......@@ -411,21 +411,20 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
unsigned long set)
{
unsigned long old, tmp, msr;
__asm__ __volatile__("\
msrclr %2, 0x2\n\
nop\n\
lw %0, %4, r0\n\
andn %1, %0, %5\n\
or %1, %1, %6\n\
sw %1, %4, r0\n\
mts rmsr, %2\n\
nop"
: "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
unsigned long flags, old, tmp;
raw_local_irq_save(flags);
__asm__ __volatile__( "lw %0, %2, r0 \n"
"andn %1, %0, %3 \n"
"or %1, %1, %4 \n"
"sw %1, %2, r0 \n"
: "=&r" (old), "=&r" (tmp)
: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
: "cc");
raw_local_irq_restore(flags);
return old;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment