Commit f8c72e9e authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.6-rmk

into ppc970.osdl.org:/home/torvalds/v2.5/linux
parents c07ac043 f01324b3
...@@ -31,6 +31,7 @@ comma = , ...@@ -31,6 +31,7 @@ comma = ,
# Note that GCC does not numerically define an architecture version # Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes # macro, but instead defines a whole series of macros which makes
# testing for a specific architecture or later rather impossible. # testing for a specific architecture or later rather impossible.
arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 -march=armv5t -Wa,-march=armv6
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call check_gcc,-march=armv5te,-march=armv4) arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call check_gcc,-march=armv5te,-march=armv4)
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
...@@ -45,6 +46,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi ...@@ -45,6 +46,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=$(call check_gcc,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale tune-$(CONFIG_CPU_XSCALE) :=$(call check_gcc,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_V6) :=-mtune=strongarm
# Need -Uarm for gcc < 3.x # Need -Uarm for gcc < 3.x
CFLAGS_BOOT :=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm CFLAGS_BOOT :=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
......
...@@ -585,7 +585,7 @@ __armv3_cache_off: ...@@ -585,7 +585,7 @@ __armv3_cache_off:
* On entry, * On entry,
* r6 = processor ID * r6 = processor ID
* On exit, * On exit,
* r1, r2, r3, r12 corrupted * r1, r2, r3, r11, r12 corrupted
* This routine must preserve: * This routine must preserve:
* r0, r4, r5, r6, r7 * r0, r4, r5, r6, r7
*/ */
...@@ -595,9 +595,25 @@ cache_clean_flush: ...@@ -595,9 +595,25 @@ cache_clean_flush:
b call_cache_fn b call_cache_fn
__armv4_cache_flush: __armv4_cache_flush:
bic r1, pc, #31 mov r2, #64*1024 @ default: 32K dcache size (*2)
add r2, r1, #65536 @ 2x the largest dcache size mov r11, #32 @ default: 32 byte line size
1: ldr r3, [r1], #32 @ s/w flush D cache mrc p15, 0, r3, c0, c0, 1 @ read cache type
teq r3, r6 @ cache ID register present?
beq no_cache_id
mov r1, r3, lsr #18
and r1, r1, #7
mov r2, #1024
mov r2, r2, lsl r1 @ base dcache size *2
tst r3, #1 << 14 @ test M bit
addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
mov r3, r3, lsr #12
and r3, r3, #3
mov r11, #8
mov r11, r11, lsl r3 @ cache line size in bytes
no_cache_id:
bic r1, pc, #63 @ align to longest cache line
add r2, r1, r2
1: ldr r3, [r1], r11 @ s/w flush D cache
teq r1, r2 teq r1, r2
bne 1b bne 1b
......
...@@ -5,6 +5,6 @@ ...@@ -5,6 +5,6 @@
obj-y += platform.o obj-y += platform.o
obj-$(CONFIG_ARM_AMBA) += amba.o obj-$(CONFIG_ARM_AMBA) += amba.o
obj-$(CONFIG_ICST525) += icst525.o obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o sa1111-pcipool.o obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o
obj-$(CONFIG_PCI_HOST_PLX90X0) += plx90x0.o obj-$(CONFIG_PCI_HOST_PLX90X0) += plx90x0.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
...@@ -315,16 +315,30 @@ amba_find_device(const char *busid, struct device *parent, unsigned int id, ...@@ -315,16 +315,30 @@ amba_find_device(const char *busid, struct device *parent, unsigned int id,
return data.dev; return data.dev;
} }
/**
* amba_request_regions - request all mem regions associated with device
* @dev: amba_device structure for device
* @name: name, or NULL to use driver name
*/
int amba_request_regions(struct amba_device *dev, const char *name) int amba_request_regions(struct amba_device *dev, const char *name)
{ {
int ret = 0; int ret = 0;
if (!name)
name = dev->dev.driver->name;
if (!request_mem_region(dev->res.start, SZ_4K, name)) if (!request_mem_region(dev->res.start, SZ_4K, name))
ret = -EBUSY; ret = -EBUSY;
return ret; return ret;
} }
/**
* amba_release_regions - release mem regions assoicated with device
* @dev: amba_device structure for device
*
* Release regions claimed by a successful call to amba_request_regions.
*/
void amba_release_regions(struct amba_device *dev) void amba_release_regions(struct amba_device *dev)
{ {
release_mem_region(dev->res.start, SZ_4K); release_mem_region(dev->res.start, SZ_4K);
......
This diff is collapsed.
This diff is collapsed.
...@@ -11,7 +11,6 @@ obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \ ...@@ -11,7 +11,6 @@ obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \
time.o traps.o time.o traps.o
obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_ARCH_ACORN) += ecard.o time-acorn.o obj-$(CONFIG_ARCH_ACORN) += ecard.o time-acorn.o
obj-$(CONFIG_ARCH_CLPS7500) += time-acorn.o obj-$(CONFIG_ARCH_CLPS7500) += time-acorn.o
obj-$(CONFIG_FOOTBRIDGE) += isa.o obj-$(CONFIG_FOOTBRIDGE) += isa.o
......
...@@ -47,6 +47,10 @@ int main(void) ...@@ -47,6 +47,10 @@ int main(void)
{ {
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK(); BLANK();
#if __LINUX_ARM_ARCH__ >= 6
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
#endif
BLANK();
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
BLANK(); BLANK();
......
...@@ -66,6 +66,15 @@ ...@@ -66,6 +66,15 @@
msr cpsr_c, \mode msr cpsr_c, \mode
.endm .endm
#if __LINUX_ARM_ARCH__ >= 6
.macro disable_irq, temp
cpsid i
.endm
.macro enable_irq, temp
cpsie i
.endm
#else
.macro disable_irq, temp .macro disable_irq, temp
set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC
.endm .endm
...@@ -73,6 +82,7 @@ ...@@ -73,6 +82,7 @@
.macro enable_irq, temp .macro enable_irq, temp
set_cpsr_c \temp, #MODE_SVC set_cpsr_c \temp, #MODE_SVC
.endm .endm
#endif
.macro save_user_regs .macro save_user_regs
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kallsyms.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -225,6 +226,34 @@ static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs) ...@@ -225,6 +226,34 @@ static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
} }
static void static void
report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
{
static int count = 100;
struct irqaction *action;
if (!count)
return;
count--;
if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
printk("irq%u: bogus retval mask %x\n", irq, ret);
} else {
printk("irq%u: nobody cared\n", irq);
}
show_regs(regs);
dump_stack();
printk(KERN_ERR "handlers:");
action = desc->action;
do {
printk("\n" KERN_ERR "[<%p>]", action->handler);
print_symbol(" (%s)", (unsigned long)action->handler);
action = action->next;
} while (action);
printk("\n");
}
static int
__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
{ {
unsigned int status; unsigned int status;
...@@ -247,18 +276,7 @@ __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) ...@@ -247,18 +276,7 @@ __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
spin_lock_irq(&irq_controller_lock); spin_lock_irq(&irq_controller_lock);
if (retval != 1) { return retval;
static int count = 100;
if (count) {
count--;
if (retval) {
printk("irq event %d: bogus retval mask %x\n",
irq, retval);
} else {
printk("irq %d: nobody cared\n", irq);
}
}
}
} }
/* /*
...@@ -276,8 +294,11 @@ do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) ...@@ -276,8 +294,11 @@ do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
kstat_cpu(cpu).irqs[irq]++; kstat_cpu(cpu).irqs[irq]++;
action = desc->action; action = desc->action;
if (action) if (action) {
__do_irq(irq, desc->action, regs); int ret = __do_irq(irq, action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
}
} }
/* /*
...@@ -313,6 +334,7 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) ...@@ -313,6 +334,7 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
do { do {
struct irqaction *action; struct irqaction *action;
int ret;
action = desc->action; action = desc->action;
if (!action) if (!action)
...@@ -323,7 +345,9 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) ...@@ -323,7 +345,9 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
desc->chip->unmask(irq); desc->chip->unmask(irq);
} }
__do_irq(irq, action, regs); ret = __do_irq(irq, action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
} while (desc->pending && !desc->disable_depth); } while (desc->pending && !desc->disable_depth);
desc->running = 0; desc->running = 0;
...@@ -368,7 +392,10 @@ do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) ...@@ -368,7 +392,10 @@ do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
*/ */
action = desc->action; action = desc->action;
if (action) { if (action) {
__do_irq(irq, desc->action, regs); int ret = __do_irq(irq, desc->action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
if (likely(!desc->disable_depth && if (likely(!desc->disable_depth &&
!check_irq_lock(desc, irq, regs))) !check_irq_lock(desc, irq, regs)))
......
/*
* linux/arch/arm/kernel/suspend.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*
* This is the common support code for suspending an ARM machine.
* pm_do_suspend() is responsible for actually putting the CPU to
* sleep.
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/pm.h>
#include <linux/errno.h>
#include <linux/sched.h>
#ifdef CONFIG_SYSCTL
/*
* We really want this to die. It's a disgusting hack using unallocated
* sysctl numbers. We should be using a real interface.
*/
static int
pm_sysctl_proc_handler(ctl_table *ctl, int write, struct file *filp,
void *buffer, size_t *lenp)
{
int ret = -EIO;
printk("PM: task %s (pid %d) uses deprecated sysctl PM interface\n",
current->comm, current->pid);
if (write)
ret = pm_suspend(PM_SUSPEND_MEM);
return ret;
}
/*
* This came from arch/arm/mach-sa1100/pm.c:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
* with modifications by Nicolas Pitre and Russell King.
*
* ARGH! ACPI people defined CTL_ACPI in linux/acpi.h rather than
* linux/sysctl.h.
*
* This means our interface here won't survive long - it needs a new
* interface. Quick hack to get this working - use sysctl id 9999.
*/
#warning ACPI broke the kernel, this interface needs to be fixed up.
#define CTL_ACPI 9999
#define ACPI_S1_SLP_TYP 19
static struct ctl_table pm_table[] =
{
{
.ctl_name = ACPI_S1_SLP_TYP,
.procname = "suspend",
.mode = 0200,
.proc_handler = pm_sysctl_proc_handler,
},
{0}
};
static struct ctl_table pm_dir_table[] =
{
{
.ctl_name = CTL_ACPI,
.procname = "pm",
.mode = 0555,
.child = pm_table,
},
{0}
};
/*
* Initialize power interface
*/
static int __init pm_init(void)
{
register_sysctl_table(pm_dir_table, 1);
return 0;
}
fs_initcall(pm_init);
#endif
...@@ -118,21 +118,21 @@ static struct resource io_res[] = { ...@@ -118,21 +118,21 @@ static struct resource io_res[] = {
#define lp2 io_res[2] #define lp2 io_res[2]
static const char *cache_types[16] = { static const char *cache_types[16] = {
"write-through", "VIVT write-through",
"write-back", "VIVT write-back",
"write-back", "VIVT write-back",
"undefined 3", "undefined 3",
"undefined 4", "undefined 4",
"undefined 5", "undefined 5",
"write-back", "VIVT write-back",
"write-back", "VIVT write-back",
"undefined 8", "undefined 8",
"undefined 9", "undefined 9",
"undefined 10", "undefined 10",
"undefined 11", "undefined 11",
"undefined 12", "undefined 12",
"undefined 13", "undefined 13",
"undefined 14", "VIPT write-back",
"undefined 15", "undefined 15",
}; };
...@@ -151,7 +151,7 @@ static const char *cache_clean[16] = { ...@@ -151,7 +151,7 @@ static const char *cache_clean[16] = {
"undefined 11", "undefined 11",
"undefined 12", "undefined 12",
"undefined 13", "undefined 13",
"undefined 14", "cp15 c7 ops",
"undefined 15", "undefined 15",
}; };
...@@ -170,7 +170,7 @@ static const char *cache_lockdown[16] = { ...@@ -170,7 +170,7 @@ static const char *cache_lockdown[16] = {
"undefined 11", "undefined 11",
"undefined 12", "undefined 12",
"undefined 13", "undefined 13",
"undefined 14", "format C",
"undefined 15", "undefined 15",
}; };
...@@ -183,7 +183,7 @@ static const char *proc_arch[] = { ...@@ -183,7 +183,7 @@ static const char *proc_arch[] = {
"5T", "5T",
"5TE", "5TE",
"5TEJ", "5TEJ",
"?(9)", "6TEJ",
"?(10)", "?(10)",
"?(11)", "?(11)",
"?(12)", "?(12)",
......
...@@ -19,10 +19,24 @@ ENTRY(__raw_readsl) ...@@ -19,10 +19,24 @@ ENTRY(__raw_readsl)
ands ip, r1, #3 ands ip, r1, #3
bne 2f bne 2f
1: ldr r3, [r0] subs r2, r2, #4
str r3, [r1], #4 bmi 1001f
subs r2, r2, #1 stmfd sp!, {r4, lr}
bne 1b 1000: ldr r3, [r0, #0]
ldr r4, [r0, #0]
ldr ip, [r0, #0]
ldr lr, [r0, #0]
subs r2, r2, #4
stmia r1!, {r3, r4, ip, lr}
bpl 1000b
ldmfd sp!, {r4, lr}
1001: tst r2, #2
ldrne r3, [r0, #0]
ldrne ip, [r0, #0]
stmneia r1!, {r3, ip}
tst r2, #1
ldrne r3, [r0, #0]
strne r3, [r1, #0]
mov pc, lr mov pc, lr
2: cmp ip, #2 2: cmp ip, #2
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <asm/div64.h>
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -111,6 +112,21 @@ unsigned int cpufreq_get(unsigned int cpu) ...@@ -111,6 +112,21 @@ unsigned int cpufreq_get(unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get); EXPORT_SYMBOL(cpufreq_get);
#endif #endif
/*
* This is the SA11x0 sched_clock implementation. This has
* a resolution of 271ns, and a maximum value of 1165s.
* ( * 1E9 / 3686400 => * 78125 / 288)
*/
unsigned long long sched_clock(void)
{
unsigned long long v;
v = (unsigned long long)OSCR * 78125;
do_div(v, 288);
return v;
}
/* /*
* Default power-off for SA1100 * Default power-off for SA1100
*/ */
...@@ -151,6 +167,36 @@ static struct platform_device sa11x0udc_device = { ...@@ -151,6 +167,36 @@ static struct platform_device sa11x0udc_device = {
.resource = sa11x0udc_resources, .resource = sa11x0udc_resources,
}; };
static struct resource sa11x0uart1_resources[] = {
[0] = {
.start = 0x80010000,
.end = 0x8001ffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device sa11x0uart1_device = {
.name = "sa11x0-uart",
.id = 1,
.num_resources = ARRAY_SIZE(sa11x0uart1_resources),
.resource = sa11x0uart1_resources,
};
static struct resource sa11x0uart3_resources[] = {
[0] = {
.start = 0x80050000,
.end = 0x8005ffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device sa11x0uart3_device = {
.name = "sa11x0-uart",
.id = 3,
.num_resources = ARRAY_SIZE(sa11x0uart3_resources),
.resource = sa11x0uart3_resources,
};
static struct resource sa11x0mcp_resources[] = { static struct resource sa11x0mcp_resources[] = {
[0] = { [0] = {
.start = 0x80060000, .start = 0x80060000,
...@@ -218,6 +264,8 @@ static struct platform_device sa11x0pcmcia_device = { ...@@ -218,6 +264,8 @@ static struct platform_device sa11x0pcmcia_device = {
static struct platform_device *sa11x0_devices[] __initdata = { static struct platform_device *sa11x0_devices[] __initdata = {
&sa11x0udc_device, &sa11x0udc_device,
&sa11x0uart1_device,
&sa11x0uart3_device,
&sa11x0mcp_device, &sa11x0mcp_device,
&sa11x0ssp_device, &sa11x0ssp_device,
&sa11x0pcmcia_device, &sa11x0pcmcia_device,
......
...@@ -223,6 +223,17 @@ config CPU_XSCALE ...@@ -223,6 +223,17 @@ config CPU_XSCALE
select CPU_TLB_V4WBI select CPU_TLB_V4WBI
select CPU_MINICACHE select CPU_MINICACHE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
depends on ARCH_INTEGRATOR
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
select CPU_COPY_V6
select CPU_TLB_V6
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type. # This defines the compiler instruction set which depends on the machine type.
config CPU_32v3 config CPU_32v3
bool bool
...@@ -233,6 +244,9 @@ config CPU_32v4 ...@@ -233,6 +244,9 @@ config CPU_32v4
config CPU_32v5 config CPU_32v5
bool bool
config CPU_32v6
bool
# The abort model # The abort model
config CPU_ABRT_EV4 config CPU_ABRT_EV4
bool bool
...@@ -249,6 +263,9 @@ config CPU_ABRT_EV5T ...@@ -249,6 +263,9 @@ config CPU_ABRT_EV5T
config CPU_ABRT_EV5TJ config CPU_ABRT_EV5TJ
bool bool
config CPU_ABRT_EV6
bool
# The cache model # The cache model
config CPU_CACHE_V3 config CPU_CACHE_V3
bool bool
...@@ -262,6 +279,9 @@ config CPU_CACHE_V4WT ...@@ -262,6 +279,9 @@ config CPU_CACHE_V4WT
config CPU_CACHE_V4WB config CPU_CACHE_V4WB
bool bool
config CPU_CACHE_V6
bool
# The copy-page model # The copy-page model
config CPU_COPY_V3 config CPU_COPY_V3
bool bool
...@@ -272,6 +292,9 @@ config CPU_COPY_V4WT ...@@ -272,6 +292,9 @@ config CPU_COPY_V4WT
config CPU_COPY_V4WB config CPU_COPY_V4WB
bool bool
config CPU_COPY_V6
bool
# This selects the TLB model # This selects the TLB model
config CPU_TLB_V3 config CPU_TLB_V3
bool bool
...@@ -306,7 +329,7 @@ comment "Processor Features" ...@@ -306,7 +329,7 @@ comment "Processor Features"
config ARM_THUMB config ARM_THUMB
bool "Support Thumb user binaries" bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_V6
default y default y
help help
Say Y if you want to include kernel support for running user space Say Y if you want to include kernel support for running user space
......
...@@ -15,15 +15,18 @@ obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o ...@@ -15,15 +15,18 @@ obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o
obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o
obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
...@@ -33,6 +36,7 @@ obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o ...@@ -33,6 +36,7 @@ obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
...@@ -48,3 +52,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o ...@@ -48,3 +52,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110) += proc-sa110.o obj-$(CONFIG_CPU_SA110) += proc-sa110.o
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
obj-$(CONFIG_CPU_V6) += proc-v6.o blockops.o
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Function: v6_early_abort
*
* Params : r2 = address of aborted instruction
* : r3 = saved SPSR
*
* Returns : r0 = address of abort
* : r1 = FSR, bit 11 = write
* : r2-r8 = corrupted
* : r9 = preserved
* : sp = pointer to registers
*
* Purpose : obtain information about current aborted instruction.
*/
.align 5
ENTRY(v6_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
mov pc, lr
...@@ -188,38 +188,33 @@ ENTRY(v4t_late_abort) ...@@ -188,38 +188,33 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop: .data_thumb_pushpop:
tst r8, #1 << 10 tst r8, #1 << 10
beq .data_unknown beq .data_unknown
mov r7, #0x11 and r6, r8, #0x55 @ hweight8(r8) + R bit
and r6, r8, r7 and r2, r8, #0xaa
and r2, r8, r7, lsl #1
add r6, r6, r2, lsr #1 add r6, r6, r2, lsr #1
and r2, r8, r7, lsl #2 and r2, r6, #0xcc
and r6, r6, #0x33
add r6, r6, r2, lsr #2 add r6, r6, r2, lsr #2
and r2, r8, r7, lsl #3 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit)
add r6, r6, r2, lsr #3 adc r6, r6, r6, lsr #4 @ high + low nibble + R bit
add r6, r6, r6, lsr #4
and r2, r8, #0x0100 @ catch 'R' bit for push/pop
add r6, r6, r2, lsr #8
and r6, r6, #15 @ number of regs to transfer and r6, r6, #15 @ number of regs to transfer
ldr r7, [sp, #13 << 2] ldr r7, [sp, #13 << 2]
tst r8, #1 << 11 tst r8, #1 << 11
addne r7, r7, r6, lsl #2 @ increment SP if PUSH addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subeq r7, r7, r6, lsl #2 @ decrement SP if POP subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [sp, #13 << 2] str r7, [sp, #13 << 2]
mov pc, lr mov pc, lr
.data_thumb_ldmstm: .data_thumb_ldmstm:
mov r7, #0x11 and r6, r8, #0x55 @ hweight8(r8)
and r6, r8, r7 and r2, r8, #0xaa
and r2, r8, r7, lsl #1
add r6, r6, r2, lsr #1 add r6, r6, r2, lsr #1
and r2, r8, r7, lsl #2 and r2, r6, #0xcc
and r6, r6, #0x33
add r6, r6, r2, lsr #2 add r6, r6, r2, lsr #2
and r2, r8, r7, lsl #3
add r6, r6, r2, lsr #3
add r6, r6, r6, lsr #4 add r6, r6, r6, lsr #4
and r6, r6, #15 @ number of regs to transfer
and r5, r8, #7 << 8 and r5, r8, #7 << 8
ldr r7, [sp, r5, lsr #6] ldr r7, [sp, r5, lsr #6]
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement sub r7, r7, r6, lsl #2 @ always decrement
str r7, [sp, r5, lsr #6] str r7, [sp, r5, lsr #6]
mov pc, lr mov pc, lr
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
extern struct cpu_cache_fns blk_cache_fns;
#define HARVARD_CACHE
/*
* blk_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
*/
static void __attribute__((naked))
blk_flush_kern_dcache_page(void *kaddr)
{
asm(
"add r1, r0, %0 \n\
1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c5, 0 \n\
mcr p15, 0, r0, c7, c10, 4 \n\
mov pc, lr"
:
: "I" (PAGE_SIZE));
}
/*
* blk_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_inv_range_unified(unsigned long start, unsigned long end)
{
asm(
"tst r0, %0 \n\
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
tst r1, %0 \n\
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr"
:
: "I" (L1_CACHE_BYTES - 1));
}
static void __attribute__((naked))
blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
{
asm(
"tst r0, %0 \n\
mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
tst r1, %0 \n\
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr"
:
: "I" (L1_CACHE_BYTES - 1));
}
/*
* blk_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_clean_range(unsigned long start, unsigned long end)
{
asm(
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr");
}
/*
* blk_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_flush_range(unsigned long start, unsigned long end)
{
asm(
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
mov pc, lr");
}
static int blockops_trap(struct pt_regs *regs, unsigned int instr)
{
regs->ARM_r4 |= regs->ARM_r2;
regs->ARM_pc += 4;
return 0;
}
static char *func[] = {
"Prefetch data range",
"Clean+Invalidate data range",
"Clean data range",
"Invalidate data range",
"Invalidate instr range"
};
static struct undef_hook blockops_hook __initdata = {
.instr_mask = 0x0fffffd0,
.instr_val = 0x0c401f00,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = blockops_trap,
};
static int __init blockops_check(void)
{
register unsigned int err asm("r4") = 0;
unsigned int cache_type;
int i;
asm("mcr p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
printk("Checking V6 block cache operations:\n");
register_undef_hook(&blockops_hook);
__asm__ ("mov r0, %0\n\t"
"mov r1, %1\n\t"
"mov r2, #1\n\t"
".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
"mov r2, #2\n\t"
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
"mov r2, #4\n\t"
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
"mov r2, #8\n\t"
".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
"mov r2, #16\n\t"
".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
:
: "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
: "r0", "r1", "r2");
unregister_undef_hook(&blockops_hook);
for (i = 0; i < ARRAY_SIZE(func); i++, err >>= 1)
printk("%30s: %ssupported\n", func[i], err & 1 ? "not " : "");
if ((err & 8) == 0) {
printk(" --> Using %s block cache invalidate\n",
cache_type & (1 << 24) ? "harvard" : "unified");
if (cache_type & (1 << 24))
cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
else
cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
}
if ((err & 4) == 0) {
printk(" --> Using block cache clean\n");
cpu_cache.dma_clean_range = blk_dma_clean_range;
}
if ((err & 2) == 0) {
printk(" --> Using block cache clean+invalidate\n");
cpu_cache.dma_flush_range = blk_dma_flush_range;
cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
}
return 0;
}
__initcall(blockops_check);
/*
* linux/arch/arm/mm/cache-v6.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include "proc-macros.S"
#define HARVARD_CACHE
#define CACHE_LINE_SIZE 32
#define D_CACHE_LINE_SIZE 32
/*
* v6_flush_cache_all()
*
* Flush the entire cache.
*
* It is assumed that:
*/
ENTRY(v6_flush_kern_cache_all)
mov r0, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
#endif
mov pc, lr
/*
* v6_flush_cache_all()
*
* Flush all TLB entries in a particular address space
*
* - mm - mm_struct describing address space
*/
ENTRY(v6_flush_user_cache_all)
/*FALLTHROUGH*/
/*
* v6_flush_cache_range(start, end, flags)
*
* Flush a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - flags - vm_area_struct flags describing address space
*
* It is assumed that:
* - we have a VIPT cache.
*/
ENTRY(v6_flush_user_cache_range)
mov pc, lr
/*
* v6_coherent_kern_range(start,end)
*
* Ensure that the I and D caches are coherent within specified
* region. This is typically used when code has been written to
* a memory region, and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
ENTRY(v6_coherent_kern_range)
bic r0, r0, #CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
#endif
mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
add r0, r0, #CACHE_LINE_SIZE
cmp r0, r1
blo 1b
#ifdef HARVARD_CACHE
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#endif
mov pc, lr
/*
* v6_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
*/
ENTRY(v6_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
#ifdef HARVARD_CACHE
mov r0, #0
mcr p15, 0, r0, c7, c10, 4
#endif
mov pc, lr
/*
* v6_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_inv_range)
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
#else
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* v6_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_clean_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcr p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* v6_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
__INITDATA
.type v6_cache_fns, #object
ENTRY(v6_cache_fns)
.long v6_flush_kern_cache_all
.long v6_flush_user_cache_all
.long v6_flush_user_cache_range
.long v6_coherent_kern_range
.long v6_flush_kern_dcache_page
.long v6_dma_inv_range
.long v6_dma_clean_range
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
/*
* linux/arch/arm/mm/copypage-v6.c
*
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
#if SHMLBA > 16384
#error FIX ME
#endif
#define from_address (0xffff8000)
#define from_pgprot PAGE_KERNEL
#define to_address (0xffffc000)
#define to_pgprot PAGE_KERNEL
static pte_t *from_pte;
static pte_t *to_pte;
static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED;
#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Copy the page, taking account of the cache colour.
*/
void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long from, to;
spin_lock(&v6_lock);
set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
flush_tlb_kernel_page(from);
flush_tlb_kernel_page(to);
copy_page((void *)to, (void *)from);
spin_unlock(&v6_lock);
}
void v6_clear_user_page(void *kaddr, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
spin_lock(&v6_lock);
set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
flush_tlb_kernel_page(to);
clear_page((void *)to);
spin_unlock(&v6_lock);
}
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page,
.cpu_copy_user_page = v6_copy_user_page,
};
static int __init v6_userpage_init(void)
{
pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset_k(from_address);
pmd = pmd_alloc(&init_mm, pgd, from_address);
if (!pmd)
BUG();
from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
if (!from_pte)
BUG();
to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
if (!to_pte)
BUG();
return 0;
}
__initcall(v6_userpage_init);
...@@ -585,20 +585,31 @@ void __init iotable_init(struct map_desc *io_desc, int nr) ...@@ -585,20 +585,31 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(io_desc + i); create_mapping(io_desc + i);
} }
static inline void free_memmap(int node, unsigned long start, unsigned long end) static inline void
free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
{ {
struct page *start_pg, *end_pg;
unsigned long pg, pgend; unsigned long pg, pgend;
start = __phys_to_virt(start); /*
end = __phys_to_virt(end); * Convert start_pfn/end_pfn to a struct page pointer.
*/
pg = PAGE_ALIGN((unsigned long)(virt_to_page(start))); start_pg = pfn_to_page(start_pfn);
pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK; end_pg = pfn_to_page(end_pfn);
start = __virt_to_phys(pg); /*
end = __virt_to_phys(pgend); * Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
free_bootmem_node(NODE_DATA(node), start, end - start); /*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
} }
static inline void free_unused_memmap_node(int node, struct meminfo *mi) static inline void free_unused_memmap_node(int node, struct meminfo *mi)
...@@ -615,7 +626,12 @@ static inline void free_unused_memmap_node(int node, struct meminfo *mi) ...@@ -615,7 +626,12 @@ static inline void free_unused_memmap_node(int node, struct meminfo *mi)
if (mi->bank[i].size == 0 || mi->bank[i].node != node) if (mi->bank[i].size == 0 || mi->bank[i].node != node)
continue; continue;
bank_start = mi->bank[i].start & PAGE_MASK; bank_start = mi->bank[i].start >> PAGE_SHIFT;
if (bank_start < prev_bank_end) {
printk(KERN_ERR "MEM: unordered memory banks. "
"Not freeing memmap.\n");
break;
}
/* /*
* If we had a previous bank, and there is a space * If we had a previous bank, and there is a space
...@@ -625,7 +641,7 @@ static inline void free_unused_memmap_node(int node, struct meminfo *mi) ...@@ -625,7 +641,7 @@ static inline void free_unused_memmap_node(int node, struct meminfo *mi)
free_memmap(node, prev_bank_end, bank_start); free_memmap(node, prev_bank_end, bank_start);
prev_bank_end = PAGE_ALIGN(mi->bank[i].start + prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
mi->bank[i].size); mi->bank[i].size) >> PAGE_SHIFT;
} }
} }
......
...@@ -35,3 +35,17 @@ ...@@ -35,3 +35,17 @@
ldr \rd, [\rd, #TI_TASK] ldr \rd, [\rd, #TI_TASK]
ldr \rd, [\rd, #TSK_ACTIVE_MM] ldr \rd, [\rd, #TSK_ACTIVE_MM]
.endm .endm
/*
* mmid - get context id from mm pointer (mm->context.id)
*/
.macro mmid, rd, rn
ldr \rd, [\rn, #MM_CONTEXT_ID]
.endm
/*
* mask_asid - mask the ASID from the context ID
*/
.macro asid, rd, rn
and \rd, \rn, #255
.endm
/*
* linux/arch/arm/mm/proc-v6.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/pgtable.h>
#include "proc-macros.S"
#define D_CACHE_LINE_SIZE 32
.macro cpsie, flags
.ifc \flags, f
.long 0xf1080040
.exitm
.endif
.ifc \flags, i
.long 0xf1080080
.exitm
.endif
.ifc \flags, if
.long 0xf10800c0
.exitm
.endif
.err
.endm
.macro cpsid, flags
.ifc \flags, f
.long 0xf10c0040
.exitm
.endif
.ifc \flags, i
.long 0xf10c0080
.exitm
.endif
.ifc \flags, if
.long 0xf10c00c0
.exitm
.endif
.err
.endm
ENTRY(cpu_v6_proc_init)
mov pc, lr
ENTRY(cpu_v6_proc_fin)
mov pc, lr
/*
* cpu_v6_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
*
* It is assumed that:
*/
.align 5
ENTRY(cpu_v6_reset)
mov pc, r0
/*
* cpu_v6_do_idle()
*
* Idle the processor (eg, wait for interrupt).
*
* IRQs are already disabled.
*/
ENTRY(cpu_v6_do_idle)
mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
mov pc, lr
ENTRY(cpu_v6_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #D_CACHE_LINE_SIZE
subs r1, r1, #D_CACHE_LINE_SIZE
bhi 1b
#endif
mov pc, lr
/*
* cpu_arm926_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys
*
* - pgd_phys - physical address of new TTB
*
* It is assumed that:
* - we are not using split page tables
*/
ENTRY(cpu_v6_switch_mm)
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
mcr p15, 0, r1, c13, c0, 1 @ set context ID
mov pc, lr
#define nG (1 << 11)
#define APX (1 << 9)
#define AP1 (1 << 5)
#define AP0 (1 << 4)
#define XN (1 << 0)
/*
* cpu_v6_set_pte(ptep, pte)
*
* Set a level 2 translation table entry.
*
* - ptep - pointer to level 2 translation table entry
* (hardware version is stored at -1024 bytes)
* - pte - PTE value to store
*
* Permissions:
* YUWD APX AP1 AP0 SVC User
* 0xxx 0 0 0 no acc no acc
* 100x 1 0 1 r/o no acc
* 10x0 1 0 1 r/o no acc
* 1011 0 0 1 r/w no acc
* 110x 1 1 0 r/o r/o
* 11x0 1 1 0 r/o r/o
* 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v6_set_pte)
str r1, [r0], #-2048 @ linux version
bic r2, r1, #0x00000ff0
bic r2, r2, #0x00000003
orr r2, r2, #AP0 | 2
tst r1, #L_PTE_WRITE
tstne r1, #L_PTE_DIRTY
orreq r2, r2, #APX
tst r1, #L_PTE_USER
orrne r2, r2, #AP1 | nG
tstne r2, #APX
eorne r2, r2, #AP0
tst r1, #L_PTE_YOUNG
biceq r2, r2, #APX | AP1 | AP0
@ tst r1, #L_PTE_EXEC
@ orreq r2, r2, #XN
tst r1, #L_PTE_PRESENT
moveq r2, #0
str r2, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
mov pc, lr
cpu_v6_name:
.asciz "Some Random V6 Processor"
.align
.section ".text.init", #alloc, #execinstr
/*
* __v6_setup
*
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
* We automatically detect if we have a Harvard cache, and use the
* Harvard cache control instructions insead of the unified cache
* control instructions.
*
* This should be able to cover all ARMv6 cores.
*
* It is assumed that:
* - cache type register is implemented
*/
__v6_setup:
mrc p15, 0, r10, c0, c0, 1 @ read cache type register
tst r10, #1 << 24 @ Harvard cache?
mov r10, #0
mcrne p15, 0, r10, c7, c14, 0 @ clean+invalidate D cache
mcrne p15, 0, r10, c7, c5, 0 @ invalidate I cache
mcreq p15, 0, r10, c7, c15, 0 @ clean+invalidate cache
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
mcr p15, 0, r4, c2, c0, 0 @ load TTB0
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
mrc p15, 0, r0, c1, c0, 0 @ read control register
ldr r10, cr1_clear @ get mask for bits to clear
bic r0, r0, r10 @ clear bits them
ldr r10, cr1_set @ get mask for bits to set
orr r0, r0, r10 @ set them
mov pc, lr @ return to head.S:__ret
/*
* V X F I D LR
* .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
* rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 0 110 0011 1.00 .111 1101 < we want
*/
.type cr1_clear, #object
.type cr1_set, #object
cr1_clear:
.word 0x0120c302
cr1_set:
.word 0x00c0387d
.type v6_processor_functions, #object
ENTRY(v6_processor_functions)
.word v6_early_abort
.word cpu_v6_proc_init
.word cpu_v6_proc_fin
.word cpu_v6_reset
.word cpu_v6_do_idle
.word cpu_v6_dcache_clean_area
.word cpu_v6_switch_mm
.word cpu_v6_set_pte
.size v6_processor_functions, . - v6_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv6"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v6"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
/*
* Match any ARMv6 processor core.
*/
.type __v6_proc_info, #object
__v6_proc_info:
.long 0x00070000
.long 0x00ff0000
.long 0x00000c0e
b __v6_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_FAST_MULT | HWCAP_VFP
.long cpu_v6_name
.long v6_processor_functions
.long v6wbi_tlb_fns
.long v6_user_fns
.long v6_cache_fns
.size __v6_proc_info, . - __v6_proc_info
/*
* linux/arch/arm/mm/tlb-v6.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB.
*/
#include <linux/linkage.h>
#include <asm/constants.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
#define HARVARD_TLB
/*
* v6wbi_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
ENTRY(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0
mmid r3, r3 @ get vm_mm->context.id
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
vma_vm_flags r2, r2 @ get vma->vm_flags
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
tst r2, #VM_EXEC @ Executable area ?
mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1)
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
/*
* v6wbi_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(v6wbi_flush_kern_tlb_range)
mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA
mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
.section ".text.init", #alloc, #execinstr
.type v6wbi_tlb_fns, #object
ENTRY(v6wbi_tlb_fns)
.long v6wbi_flush_user_tlb_range
.long v6wbi_flush_kern_tlb_range
.long v6wbi_tlb_flags
.size v6wbi_tlb_fns, . - v6wbi_tlb_fns
...@@ -50,11 +50,6 @@ ...@@ -50,11 +50,6 @@
#ifdef MODULE #ifdef MODULE
void fp_send_sig(unsigned long sig, struct task_struct *p, int priv); void fp_send_sig(unsigned long sig, struct task_struct *p, int priv);
#if LINUX_VERSION_CODE > 0x20115
MODULE_AUTHOR("Scott Bambrough <scottb@rebel.com>");
MODULE_DESCRIPTION("NWFPE floating point emulator (" NWFPE_BITS " precision)");
#endif
#else #else
#define fp_send_sig send_sig #define fp_send_sig send_sig
#define kern_fp_enter fp_enter #define kern_fp_enter fp_enter
...@@ -172,3 +167,7 @@ void float_raise(signed char flags) ...@@ -172,3 +167,7 @@ void float_raise(signed char flags)
module_init(fpe_init); module_init(fpe_init);
module_exit(fpe_exit); module_exit(fpe_exit);
MODULE_AUTHOR("Scott Bambrough <scottb@rebel.com>");
MODULE_DESCRIPTION("NWFPE floating point emulator (" NWFPE_BITS " precision)");
MODULE_LICENSE("GPL");
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
# To add an entry into this database, please see Documentation/arm/README, # To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk # or contact rmk@arm.linux.org.uk
# #
# Last update: Thu Sep 18 17:15:55 2003 # Last update: Tue Feb 24 17:17:50 2004
# #
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
# #
...@@ -202,7 +202,7 @@ karo ARCH_KARO KARO 190 ...@@ -202,7 +202,7 @@ karo ARCH_KARO KARO 190
fester SA1100_FESTER FESTER 191 fester SA1100_FESTER FESTER 191
gpi ARCH_GPI GPI 192 gpi ARCH_GPI GPI 192
smdk2410 ARCH_SMDK2410 SMDK2410 193 smdk2410 ARCH_SMDK2410 SMDK2410 193
premium ARCH_PREMIUM PREMIUM 194 i519 ARCH_I519 I519 194
nexio SA1100_NEXIO NEXIO 195 nexio SA1100_NEXIO NEXIO 195
bitbox SA1100_BITBOX BITBOX 196 bitbox SA1100_BITBOX BITBOX 196
g200 SA1100_G200 G200 197 g200 SA1100_G200 G200 197
...@@ -259,7 +259,7 @@ stork_nest ARCH_STORK_NEST STORK_NEST 247 ...@@ -259,7 +259,7 @@ stork_nest ARCH_STORK_NEST STORK_NEST 247
stork_egg ARCH_STORK_EGG STORK_EGG 248 stork_egg ARCH_STORK_EGG STORK_EGG 248
wismo SA1100_WISMO WISMO 249 wismo SA1100_WISMO WISMO 249
ezlinx ARCH_EZLINX EZLINX 250 ezlinx ARCH_EZLINX EZLINX 250
at91rm9200 ARCH_AT91 AT91 251 at91rm9200 ARCH_AT91RM9200 AT91RM9200 251
orion ARCH_ORION ORION 252 orion ARCH_ORION ORION 252
neptune ARCH_NEPTUNE NEPTUNE 253 neptune ARCH_NEPTUNE NEPTUNE 253
hackkit SA1100_HACKKIT HACKKIT 254 hackkit SA1100_HACKKIT HACKKIT 254
...@@ -295,7 +295,7 @@ viper ARCH_VIPER VIPER 283 ...@@ -295,7 +295,7 @@ viper ARCH_VIPER VIPER 283
adsbitsyplus SA1100_ADSBITSYPLUS ADSBITSYPLUS 284 adsbitsyplus SA1100_ADSBITSYPLUS ADSBITSYPLUS 284
adsagc SA1100_ADSAGC ADSAGC 285 adsagc SA1100_ADSAGC ADSAGC 285
stp7312 ARCH_STP7312 STP7312 286 stp7312 ARCH_STP7312 STP7312 286
nx_phnx ARCH_PXA255 PXA255 287 nx_phnx MACH_NX_PHNX NX_PHNX 287
wep_ep250 ARCH_WEP_EP250 WEP_EP250 288 wep_ep250 ARCH_WEP_EP250 WEP_EP250 288
inhandelf3 ARCH_INHANDELF3 INHANDELF3 289 inhandelf3 ARCH_INHANDELF3 INHANDELF3 289
adi_coyote ARCH_ADI_COYOTE ADI_COYOTE 290 adi_coyote ARCH_ADI_COYOTE ADI_COYOTE 290
...@@ -364,7 +364,7 @@ ixrd425 ARCH_IXRD425 IXRD425 352 ...@@ -364,7 +364,7 @@ ixrd425 ARCH_IXRD425 IXRD425 352
iq80315 ARCH_IQ80315 IQ80315 353 iq80315 ARCH_IQ80315 IQ80315 353
nmp7312 ARCH_NMP7312 NMP7312 354 nmp7312 ARCH_NMP7312 NMP7312 354
cx861xx ARCH_CX861XX CX861XX 355 cx861xx ARCH_CX861XX CX861XX 355
ixp2000 ARCH_IXP2000 IXP2000 356 enp2611 ARCH_ENP2611 ENP2611 356
xda SA1100_XDA XDA 357 xda SA1100_XDA XDA 357
csir_ims ARCH_CSIR_IMS CSIR_IMS 358 csir_ims ARCH_CSIR_IMS CSIR_IMS 358
ixp421_dnaeeth ARCH_IXP421_DNAEETH IXP421_DNAEETH 359 ixp421_dnaeeth ARCH_IXP421_DNAEETH IXP421_DNAEETH 359
...@@ -385,3 +385,92 @@ gumstik ARCH_GUMSTIK GUMSTIK 373 ...@@ -385,3 +385,92 @@ gumstik ARCH_GUMSTIK GUMSTIK 373
rcube ARCH_RCUBE RCUBE 374 rcube ARCH_RCUBE RCUBE 374
rea_olv ARCH_REA_OLV REA_OLV 375 rea_olv ARCH_REA_OLV REA_OLV 375
pxa_iphone ARCH_PXA_IPHONE PXA_IPHONE 376 pxa_iphone ARCH_PXA_IPHONE PXA_IPHONE 376
s3c3410 ARCH_S3C3410 S3C3410 377
espd_4510b ARCH_ESPD_4510B ESPD_4510B 378
mp1x ARCH_MP1X MP1X 379
at91rm9200tb ARCH_AT91RM9200TB AT91RM9200TB 380
adsvgx ARCH_ADSVGX ADSVGX 381
omap1610 ARCH_OMAP1610 OMAP1610 382
pelee ARCH_PELEE PELEE 383
e7xx ARCH_E7XX E7XX 384
iq80331 ARCH_IQ80331 IQ80331 385
versatile_pb ARCH_VERSATILE_PB VERSATILE_PB 387
kev7a400 MACH_KEV7A400 KEV7A400 388
lpd7a400 MACH_LPD7A400 LPD7A400 389
lpd7a404 MACH_LPD7A404 LPD7A404 390
fujitsu_camelot ARCH_FUJITSU_CAMELOT FUJITSU_CAMELOT 391
janus2m ARCH_JANUS2M JANUS2M 392
embtf MACH_EMBTF EMBTF 393
hpm MACH_HPM HPM 394
smdk2410tk MACH_SMDK2410TK SMDK2410TK 395
smdk2410aj MACH_SMDK2410AJ SMDK2410AJ 396
streetracer MACH_STREETRACER STREETRACER 397
eframe MACH_EFRAME EFRAME 398
csb337 MACH_CSB337 CSB337 399
pxa_lark MACH_PXA_LARK PXA_LARK 400
pxa_pnp2110 MACH_PNP2110 PNP2110 401
tcc72x MACH_TCC72X TCC72X 402
altair MACH_ALTAIR ALTAIR 403
kc3 MACH_KC3 KC3 404
sinteftd MACH_SINTEFTD SINTEFTD 405
mainstone MACH_MAINSTONE MAINSTONE 406
aday4x MACH_ADAY4X ADAY4X 407
lite300 MACH_LITE300 LITE300 408
s5c7376 MACH_S5C7376 S5C7376 409
mt02 MACH_MT02 MT02 410
mport3s MACH_MPORT3S MPORT3S 411
ra_alpha MACH_RA_ALPHA RA_ALPHA 412
xcep MACH_XCEP XCEP 413
arcom_mercury MACH_ARCOM_MERCURY ARCOM_MERCURY 414
stargate MACH_STARGATE STARGATE 415
armadilloj MACH_ARMADILLOJ ARMADILLOJ 416
elroy_jack MACH_ELROY_JACK ELROY_JACK 417
backend MACH_BACKEND BACKEND 418
s5linbox MACH_S5LINBOX S5LINBOX 419
nomadik MACH_NOMADIK NOMADIK 420
ia_cpu_9200 MACH_IA_CPU_9200 IA_CPU_9200 421
at91_bja1 MACH_AT91_BJA1 AT91_BJA1 422
corgi MACH_CORGI CORGI 423
poodle MACH_POODLE POODLE 424
ten MACH_TEN TEN 425
roverp5p MACH_ROVERP5P ROVERP5P 426
sc2700 MACH_SC2700 SC2700 427
ex_eagle MACH_EX_EAGLE EX_EAGLE 428
nx_pxa12 MACH_NX_PXA12 NX_PXA12 429
nx_pxa5 MACH_NX_PXA5 NX_PXA5 430
blackboard2 MACH_BLACKBOARD2 BLACKBOARD2 431
i819 MACH_I819 I819 432
ixmb995e MACH_IXMB995E IXMB995E 433
skyrider MACH_SKYRIDER SKYRIDER 434
skyhawk MACH_SKYHAWK SKYHAWK 435
enterprise MACH_ENTERPRISE ENTERPRISE 436
dep2410 MACH_DEP2410 DEP2410 437
armcore MACH_ARMCORE ARMCORE 438
hobbit MACH_HOBBIT HOBBIT 439
h7210 MACH_H7210 H7210 440
pxa_netdcu5 MACH_PXA_NETDCU5 PXA_NETDCU5 441
acc MACH_ACC ACC 442
esl_sarva MACH_ESL_SARVA ESL_SARVA 443
xm250 MACH_XM250 XM250 444
t6tc1xb MACH_T6TC1XB T6TC1XB 445
ess710 MACH_ESS710 ESS710 446
mx3ads MACH_MX3ADS MX3ADS 447
himalaya MACH_HIMALAYA HIMALAYA 448
bolfenk MACH_BOLFENK BOLFENK 449
at91rm9200kr MACH_AT91RM9200KR AT91RM9200KR 450
edb9312 MACH_EDB9312 EDB9312 451
omap_generic MACH_OMAP_GENERIC OMAP_GENERIC 452
aximx3 MACH_AXIMX3 AXIMX3 453
eb67xdip MACH_EB67XDIP EB67XDIP 454
webtxs MACH_WEBTXS WEBTXS 455
hawk MACH_HAWK HAWK 456
ccat91sbc001 MACH_CCAT91SBC001 CCAT91SBC001 457
expresso MACH_EXPRESSO EXPRESSO 458
h4000 MACH_H4000 H4000 459
dino MACH_DINO DINO 460
ml675k MACH_ML675K ML675K 461
edb9301 MACH_EDB9301 EDB9301 462
edb9315 MACH_EDB9315 EDB9315 463
reciva_tt MACH_RECIVA_TT RECIVA_TT 464
cstcb01 MACH_CSTCB01 CSTCB01 465
cstcb1 MACH_CSTCB1 CSTCB1 466
/* /*
* linux/include/asm-arm/atomic.h * linux/include/asm-arm/atomic.h
* *
* Copyright (c) 1996 Russell King. * Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*
* Changelog:
* 27-06-1996 RMK Created
* 13-04-1997 RMK Made functions atomic!
* 07-12-1997 RMK Upgraded for v2.1.
* 26-08-1998 PJB Added #ifdef __KERNEL__
*/ */
#ifndef __ASM_ARM_ATOMIC_H #ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H #define __ASM_ARM_ATOMIC_H
#include <linux/config.h> #include <linux/config.h>
#ifdef CONFIG_SMP
#error SMP not supported
#endif
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/system.h>
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#if __LINUX_ARM_ARCH__ >= 6
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. Writing to 'v->counter'
* without using the following operations WILL break the atomic
* nature of these ops.
*/
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long tmp;
__asm__ __volatile__("@ atomic_set\n"
"1: ldrex %0, [%1]\n"
" strex %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline void atomic_add(int i, volatile atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline void atomic_sub(int i, volatile atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
static inline int atomic_dec_and_test(volatile atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_dec_and_test\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=r" (tmp)
: "r" (&v->counter)
: "cc");
return result == 0;
}
static inline int atomic_add_negative(int i, volatile atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_add_negative\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
return result < 0;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_clear_mask\n"
"1: ldrex %0, %2\n"
" bic %0, %0, %3\n"
" strex %1, %0, %2\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (addr), "Ir" (mask)
: "cc");
}
#else /* ARM_ARCH_6 */
#include <asm/system.h>
#ifdef CONFIG_SMP
#error SMP not supported on pre-ARMv6 CPUs
#endif
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
static inline void atomic_add(int i, volatile atomic_t *v) static inline void atomic_add(int i, volatile atomic_t *v)
...@@ -103,6 +209,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -103,6 +209,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
local_irq_restore(flags); local_irq_restore(flags);
} }
#endif /* __LINUX_ARM_ARCH__ */
/* Atomic operations are already serializing on ARM */ /* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier()
......
...@@ -69,6 +69,14 @@ ...@@ -69,6 +69,14 @@
# endif # endif
#endif #endif
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE) #if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model #error Unknown cache maintainence model
#endif #endif
......
...@@ -22,12 +22,12 @@ extern void consistent_sync(void *kaddr, size_t size, int rw); ...@@ -22,12 +22,12 @@ extern void consistent_sync(void *kaddr, size_t size, int rw);
* For SA-1111 these functions are "magic" and utilize bounce * For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs. * bufferes as needed to work around SA-1111 DMA bugs.
*/ */
dma_addr_t sa1111_map_single(void *, size_t, int); dma_addr_t sa1111_map_single(struct device *dev, void *, size_t, enum dma_data_direction);
void sa1111_unmap_single(dma_addr_t, size_t, int); void sa1111_unmap_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
int sa1111_map_sg(struct scatterlist *, int, int); int sa1111_map_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_unmap_sg(struct scatterlist *, int, int); void sa1111_unmap_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_dma_sync_single(dma_addr_t, size_t, int); void sa1111_dma_sync_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
void sa1111_dma_sync_sg(struct scatterlist *, int, int); void sa1111_dma_sync_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
#ifdef CONFIG_SA1111 #ifdef CONFIG_SA1111
...@@ -122,7 +122,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, ...@@ -122,7 +122,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev)) if (dmadev_is_sa1111(dev))
return sa1111_map_single(cpu_addr, size, dir); return sa1111_map_single(dev, cpu_addr, size, dir);
consistent_sync(cpu_addr, size, dir); consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr); return __virt_to_bus((unsigned long)cpu_addr);
...@@ -169,7 +169,7 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, ...@@ -169,7 +169,7 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev)) if (dmadev_is_sa1111(dev))
sa1111_unmap_single(handle, size, dir); sa1111_unmap_single(dev, handle, size, dir);
/* nothing to do */ /* nothing to do */
} }
...@@ -224,7 +224,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -224,7 +224,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
int i; int i;
if (dmadev_is_sa1111(dev)) if (dmadev_is_sa1111(dev))
return sa1111_map_sg(sg, nents, dir); return sa1111_map_sg(dev, sg, nents, dir);
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt; char *virt;
...@@ -253,7 +253,7 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -253,7 +253,7 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev)) { if (dmadev_is_sa1111(dev)) {
sa1111_unmap_sg(sg, nents, dir); sa1111_unmap_sg(dev, sg, nents, dir);
return; return;
} }
...@@ -281,7 +281,7 @@ dma_sync_single(struct device *dev, dma_addr_t handle, size_t size, ...@@ -281,7 +281,7 @@ dma_sync_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
if (dmadev_is_sa1111(dev)) { if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single(handle, size, dir); sa1111_dma_sync_single(dev, handle, size, dir);
return; return;
} }
...@@ -308,7 +308,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -308,7 +308,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nents,
int i; int i;
if (dmadev_is_sa1111(dev)) { if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg(sg, nents, dir); sa1111_dma_sync_sg(dev, sg, nents, dir);
return; return;
} }
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
* v4t_early - ARMv4 with Thumb early abort handler * v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler * v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions * xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
*/ */
#undef CPU_ABORT_HANDLER #undef CPU_ABORT_HANDLER
#undef MULTI_ABORT #undef MULTI_ABORT
...@@ -98,6 +99,14 @@ ...@@ -98,6 +99,14 @@
# endif # endif
#endif #endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
# define CPU_ABORT_HANDLER v6_early_abort
# endif
#endif
#ifndef CPU_ABORT_HANDLER #ifndef CPU_ABORT_HANDLER
#error Unknown data abort handler type #error Unknown data abort handler type
#endif #endif
......
...@@ -12,6 +12,127 @@ ...@@ -12,6 +12,127 @@
#ifndef __ASM_PROC_LOCKS_H #ifndef __ASM_PROC_LOCKS_H
#define __ASM_PROC_LOCKS_H #define __ASM_PROC_LOCKS_H
#if __LINUX_ARM_ARCH__ >= 6
#define __down_op(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op\n" \
"1: ldrex lr, [%0]\n" \
" sub lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movmi ip, %0\n" \
" blmi " #fail \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
#define __down_op_ret(ptr,fail) \
({ \
unsigned int ret; \
__asm__ __volatile__( \
"@ down_op_ret\n" \
"1: ldrex lr, [%1]\n" \
" sub lr, lr, %2\n" \
" strex ip, lr, [%1]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movmi ip, %1\n" \
" movpl ip, #0\n" \
" blmi " #fail "\n" \
" mov %0, ip" \
: "=&r" (ret) \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
ret; \
})
#define __up_op(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movle ip, %0\n" \
" blle " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
/*
* The value 0x01000000 supports up to 128 processors and
* lots of processes. BIAS must be chosen such that sub'ing
* BIAS once per CPU will result in the long remaining
* negative.
*/
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
#define __down_op_write(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op_write\n" \
"1: ldrex lr, [%0]\n" \
" sub lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movne ip, %0\n" \
" blne " #fail \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc", "memory"); \
})
#define __up_op_write(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" movcs ip, %0\n" \
" blcs " #wake \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc", "memory"); \
})
#define __down_op_read(ptr,fail) \
__down_op(ptr, fail)
#define __up_op_read(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" moveq ip, %0\n" \
" bleq " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
#else
#define __down_op(ptr,fail) \ #define __down_op(ptr,fail) \
({ \ ({ \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -137,3 +258,5 @@ ...@@ -137,3 +258,5 @@
}) })
#endif #endif
#endif
...@@ -84,6 +84,14 @@ ...@@ -84,6 +84,14 @@
# endif # endif
#endif #endif
#ifdef CONFIG_CPU_COPY_V6
# ifdef _USER
# define MULTI_USER 1
# else
# define _USER v6
# endif
#endif
#ifndef _USER #ifndef _USER
#error Unknown user operations model #error Unknown user operations model
#endif #endif
......
...@@ -130,6 +130,14 @@ ...@@ -130,6 +130,14 @@
# define CPU_NAME cpu_xscale # define CPU_NAME cpu_xscale
# endif # endif
# endif # endif
# ifdef CONFIG_CPU_V6
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
# endif
#endif #endif
#ifndef MULTI_CPU #ifndef MULTI_CPU
......
...@@ -6,6 +6,10 @@ ...@@ -6,6 +6,10 @@
* or page size, whichever is greater since the cache aliases * or page size, whichever is greater since the cache aliases
* every size/ways bytes. * every size/ways bytes.
*/ */
#if __LINUX_ARM_ARCH__ > 5
#define SHMLBA (4 * PAGE_SIZE)
#else
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif
#endif /* _ASMARM_SHMPARAM_H */ #endif /* _ASMARM_SHMPARAM_H */
#ifndef __ASM_SPINLOCK_H #ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H
#error ARM architecture does not support SMP spin locks #if __LINUX_ARM_ARCH__ < 6
#error SMP not supported on pre-ARMv6 CPUs
#endif
/*
* ARMv6 Spin-locking.
*
* We (exclusively) read the old value, and decrement it. If it
* hits zero, we may have won the lock, so we try (exclusively)
* storing it.
*
* Unlocked value: 0
* Locked value: 1
*/
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
static inline void _raw_spin_lock(spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc", "memory");
}
static inline int _raw_spin_trylock(spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc", "memory");
return tmp == 0;
}
static inline void _raw_spin_unlock(spinlock_t *lock)
{
__asm__ __volatile__(
" str %1, [%0]"
:
: "r" (&lock->lock), "r" (0)
: "cc", "memory");
}
/*
* RWLOCKS
*/
typedef struct {
volatile unsigned int lock;
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0)
/*
* Write locks are easy - we just set bit 31. When unlocking, we can
* just write zero since the lock is exclusively held.
*/
static inline void _raw_write_lock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
}
static inline void _raw_write_unlock(rwlock_t *rw)
{
__asm__ __volatile__(
"str %1, [%0]"
:
: "r" (&rw->lock), "r" (0)
: "cc", "memory");
}
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
* - Increment it.
* - Store new lock value if positive, and we still own this location.
* If the value is negative, we've already failed.
* - If we failed to store the value, we want a negative result.
* - If we failed, try again.
* Unlocking is similarly hairy. We may have multiple read locks
* currently active. However, we know we won't have any write
* locks.
*/
static inline void _raw_read_lock(rwlock_t *rw)
{
unsigned long tmp, tmp2;
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc", "memory");
}
static inline void _raw_read_unlock(rwlock_t *rw)
{
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc", "memory");
}
static inline int _raw_write_trylock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
return tmp == 0;
}
#endif /* __ASM_SPINLOCK_H */ #endif /* __ASM_SPINLOCK_H */
...@@ -12,7 +12,8 @@ ...@@ -12,7 +12,8 @@
#define CPU_ARCH_ARMv5 4 #define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5 #define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6 #define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv6 7 #define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
/* /*
* CR1 bits (CP#15 CR1) * CR1 bits (CP#15 CR1)
...@@ -123,6 +124,26 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info ...@@ -123,6 +124,26 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
mb(); \ mb(); \
} while (0) } while (0)
/*
* CPU interrupt mask handling.
*/
#if __LINUX_ARM_ARCH__ >= 6
#define local_irq_save(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \
"cpsid i" \
: "=r" (x) : : "memory", "cc"); \
})
#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#else
/* /*
* Save the current interrupt enable state & disable IRQs * Save the current interrupt enable state & disable IRQs
*/ */
...@@ -199,6 +220,8 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info ...@@ -199,6 +220,8 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
: "memory", "cc"); \ : "memory", "cc"); \
}) })
#endif
/* /*
* Save the current interrupt enable state. * Save the current interrupt enable state.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment