Commit 32f6e431 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.6-rmk

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 2cd24267 eb2e58de
...@@ -55,8 +55,8 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) ...@@ -55,8 +55,8 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110)
tune-$(CONFIG_CPU_V6) :=-mtune=strongarm tune-$(CONFIG_CPU_V6) :=-mtune=strongarm
# Need -Uarm for gcc < 3.x # Need -Uarm for gcc < 3.x
CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) $(call cc-option,-malignment-traps,-mshort-load-bytes) -msoft-float -Wa,-mno-fpu -Uarm CFLAGS +=-mapcs-32 $(arch-y) $(tune-y) $(call cc-option,-malignment-traps,-mshort-load-bytes) -msoft-float -Uarm
AFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -msoft-float -Wa,-mno-fpu AFLAGS +=-mapcs-32 $(arch-y) $(tune-y) -msoft-float
CHECKFLAGS += -D__arm__=1 CHECKFLAGS += -D__arm__=1
......
...@@ -381,7 +381,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) ...@@ -381,7 +381,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
if (end < start) if (end < start || flags)
return; return;
vma = find_vma(current->active_mm, start); vma = find_vma(current->active_mm, start);
...@@ -391,7 +391,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) ...@@ -391,7 +391,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
if (end > vma->vm_end) if (end > vma->vm_end)
end = vma->vm_end; end = vma->vm_end;
flush_cache_range(vma, start, end); flush_cache_user_range(vma, start, end);
} }
} }
......
...@@ -25,34 +25,58 @@ ...@@ -25,34 +25,58 @@
/* /*
* This is for IRQs known as PXA_IRQ([8...31]). * This is for peripheral IRQs internal to the PXA chip.
*/ */
static void pxa_mask_irq(unsigned int irq) static void pxa_mask_low_irq(unsigned int irq)
{ {
ICMR &= ~(1 << (irq + PXA_IRQ_SKIP)); ICMR &= ~(1 << (irq + PXA_IRQ_SKIP));
} }
static void pxa_unmask_irq(unsigned int irq) static void pxa_unmask_low_irq(unsigned int irq)
{ {
ICMR |= (1 << (irq + PXA_IRQ_SKIP)); ICMR |= (1 << (irq + PXA_IRQ_SKIP));
} }
static struct irqchip pxa_internal_chip = { static struct irqchip pxa_internal_chip_low = {
.ack = pxa_mask_irq, .ack = pxa_mask_low_irq,
.mask = pxa_mask_irq, .mask = pxa_mask_low_irq,
.unmask = pxa_unmask_irq, .unmask = pxa_unmask_low_irq,
}; };
#if PXA_INTERNAL_IRQS > 32
/*
* This is for the second set of internal IRQs as found on the PXA27x.
*/
static void pxa_mask_high_irq(unsigned int irq)
{
ICMR2 &= ~(1 << (irq - 32 + PXA_IRQ_SKIP));
}
static void pxa_unmask_high_irq(unsigned int irq)
{
ICMR2 |= (1 << (irq - 32 + PXA_IRQ_SKIP));
}
static struct irqchip pxa_internal_chip_high = {
.ack = pxa_mask_high_irq,
.mask = pxa_mask_high_irq,
.unmask = pxa_unmask_high_irq,
};
#endif
/* /*
* PXA GPIO edge detection for IRQs: * PXA GPIO edge detection for IRQs:
* IRQs are generated on Falling-Edge, Rising-Edge, or both. * IRQs are generated on Falling-Edge, Rising-Edge, or both.
* Use this instead of directly setting GRER/GFER. * Use this instead of directly setting GRER/GFER.
*/ */
static long GPIO_IRQ_rising_edge[3]; static long GPIO_IRQ_rising_edge[4];
static long GPIO_IRQ_falling_edge[3]; static long GPIO_IRQ_falling_edge[4];
static long GPIO_IRQ_mask[3]; static long GPIO_IRQ_mask[4];
static int pxa_gpio_irq_type(unsigned int irq, unsigned int type) static int pxa_gpio_irq_type(unsigned int irq, unsigned int type)
{ {
...@@ -106,13 +130,13 @@ static void pxa_ack_low_gpio(unsigned int irq) ...@@ -106,13 +130,13 @@ static void pxa_ack_low_gpio(unsigned int irq)
static struct irqchip pxa_low_gpio_chip = { static struct irqchip pxa_low_gpio_chip = {
.ack = pxa_ack_low_gpio, .ack = pxa_ack_low_gpio,
.mask = pxa_mask_irq, .mask = pxa_mask_low_irq,
.unmask = pxa_unmask_irq, .unmask = pxa_unmask_low_irq,
.type = pxa_gpio_irq_type, .type = pxa_gpio_irq_type,
}; };
/* /*
* Demux handler for GPIO 2-80 edge detect interrupts * Demux handler for GPIO>=2 edge detect interrupts
*/ */
static void pxa_gpio_demux_handler(unsigned int irq, struct irqdesc *desc, static void pxa_gpio_demux_handler(unsigned int irq, struct irqdesc *desc,
...@@ -169,6 +193,23 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irqdesc *desc, ...@@ -169,6 +193,23 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irqdesc *desc,
} while (mask); } while (mask);
loop = 1; loop = 1;
} }
#if PXA_LAST_GPIO >= 96
mask = GEDR3;
if (mask) {
GEDR3 = mask;
irq = IRQ_GPIO(96);
desc = irq_desc + irq;
do {
if (mask & 1)
desc->handle(irq, desc, regs);
irq++;
desc++;
mask >>= 1;
} while (mask);
loop = 1;
}
#endif
} while (loop); } while (loop);
} }
...@@ -208,17 +249,18 @@ void __init pxa_init_irq(void) ...@@ -208,17 +249,18 @@ void __init pxa_init_irq(void)
int irq; int irq;
/* disable all IRQs */ /* disable all IRQs */
ICMR = 0; ICMR = ICMR2 = 0;
/* all IRQs are IRQ, not FIQ */ /* all IRQs are IRQ, not FIQ */
ICLR = 0; ICLR = ICLR2 = 0;
/* clear all GPIO edge detects */ /* clear all GPIO edge detects */
GFER0 = GFER1 = GFER2 = 0; GFER0 = GFER1 = GFER2 = GFER3 = 0;
GRER0 = GRER1 = GRER2 = 0; GRER0 = GRER1 = GRER2 = GRER3 = 0;
GEDR0 = GEDR0; GEDR0 = GEDR0;
GEDR1 = GEDR1; GEDR1 = GEDR1;
GEDR2 = GEDR2; GEDR2 = GEDR2;
GEDR3 = GEDR3;
/* only unmasked interrupts kick us out of idle */ /* only unmasked interrupts kick us out of idle */
ICCR = 1; ICCR = 1;
...@@ -227,10 +269,18 @@ void __init pxa_init_irq(void) ...@@ -227,10 +269,18 @@ void __init pxa_init_irq(void)
GPIO_IRQ_mask[0] = 3; GPIO_IRQ_mask[0] = 3;
for (irq = PXA_IRQ(PXA_IRQ_SKIP); irq <= PXA_IRQ(31); irq++) { for (irq = PXA_IRQ(PXA_IRQ_SKIP); irq <= PXA_IRQ(31); irq++) {
set_irq_chip(irq, &pxa_internal_chip); set_irq_chip(irq, &pxa_internal_chip_low);
set_irq_handler(irq, do_level_IRQ);
set_irq_flags(irq, IRQF_VALID);
}
#if PXA_INTERNAL_IRQS > 32
for (irq = PXA_IRQ(32); irq < PXA_IRQ(PXA_INTERNAL_IRQS); irq++) {
set_irq_chip(irq, &pxa_internal_chip_high);
set_irq_handler(irq, do_level_IRQ); set_irq_handler(irq, do_level_IRQ);
set_irq_flags(irq, IRQF_VALID); set_irq_flags(irq, IRQF_VALID);
} }
#endif
for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) { for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) {
set_irq_chip(irq, &pxa_low_gpio_chip); set_irq_chip(irq, &pxa_low_gpio_chip);
...@@ -238,13 +288,13 @@ void __init pxa_init_irq(void) ...@@ -238,13 +288,13 @@ void __init pxa_init_irq(void)
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
} }
for (irq = IRQ_GPIO(2); irq <= IRQ_GPIO(80); irq++) { for (irq = IRQ_GPIO(2); irq <= IRQ_GPIO(PXA_LAST_GPIO); irq++) {
set_irq_chip(irq, &pxa_muxed_gpio_chip); set_irq_chip(irq, &pxa_muxed_gpio_chip);
set_irq_handler(irq, do_edge_IRQ); set_irq_handler(irq, do_edge_IRQ);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
} }
/* Install handler for GPIO 2-80 edge detect interrupts */ /* Install handler for GPIO>=2 edge detect interrupts */
set_irq_chip(IRQ_GPIO_2_80, &pxa_internal_chip); set_irq_chip(IRQ_GPIO_2_x, &pxa_internal_chip_low);
set_irq_chained_handler(IRQ_GPIO_2_80, pxa_gpio_demux_handler); set_irq_chained_handler(IRQ_GPIO_2_x, pxa_gpio_demux_handler);
} }
...@@ -3,7 +3,8 @@ ...@@ -3,7 +3,8 @@
# #
obj-y := consistent.o extable.o fault-armv.o \ obj-y := consistent.o extable.o fault-armv.o \
fault.o init.o ioremap.o mmap.o mm-armv.o fault.o flush.o init.o ioremap.o mmap.o \
mm-armv.o
obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_MODULES) += proc-syms.o
......
...@@ -57,6 +57,19 @@ ENTRY(v3_flush_user_cache_range) ...@@ -57,6 +57,19 @@ ENTRY(v3_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v3_coherent_kern_range) ENTRY(v3_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
...@@ -116,6 +129,7 @@ ENTRY(v3_cache_fns) ...@@ -116,6 +129,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_user_cache_all .long v3_flush_user_cache_all
.long v3_flush_user_cache_range .long v3_flush_user_cache_range
.long v3_coherent_kern_range .long v3_coherent_kern_range
.long v3_coherent_user_range
.long v3_flush_kern_dcache_page .long v3_flush_kern_dcache_page
.long v3_dma_inv_range .long v3_dma_inv_range
.long v3_dma_clean_range .long v3_dma_clean_range
......
...@@ -59,6 +59,19 @@ ENTRY(v4_flush_user_cache_range) ...@@ -59,6 +59,19 @@ ENTRY(v4_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4_coherent_kern_range) ENTRY(v4_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
...@@ -118,6 +131,7 @@ ENTRY(v4_cache_fns) ...@@ -118,6 +131,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_user_cache_all .long v4_flush_user_cache_all
.long v4_flush_user_cache_range .long v4_flush_user_cache_range
.long v4_coherent_kern_range .long v4_coherent_kern_range
.long v4_coherent_user_range
.long v4_flush_kern_dcache_page .long v4_flush_kern_dcache_page
.long v4_dma_inv_range .long v4_dma_inv_range
.long v4_dma_clean_range .long v4_dma_clean_range
......
...@@ -121,6 +121,19 @@ ENTRY(v4wb_flush_kern_dcache_page) ...@@ -121,6 +121,19 @@ ENTRY(v4wb_flush_kern_dcache_page)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4wb_coherent_kern_range) ENTRY(v4wb_coherent_kern_range)
/* fall through */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
...@@ -195,6 +208,7 @@ ENTRY(v4wb_cache_fns) ...@@ -195,6 +208,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_user_cache_all .long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range .long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range .long v4wb_coherent_kern_range
.long v4wb_coherent_user_range
.long v4wb_flush_kern_dcache_page .long v4wb_flush_kern_dcache_page
.long v4wb_dma_inv_range .long v4wb_dma_inv_range
.long v4wb_dma_clean_range .long v4wb_dma_clean_range
......
...@@ -97,6 +97,19 @@ ENTRY(v4wt_flush_user_cache_range) ...@@ -97,6 +97,19 @@ ENTRY(v4wt_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(v4wt_coherent_kern_range) ENTRY(v4wt_coherent_kern_range)
/* FALLTRHOUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -167,6 +180,7 @@ ENTRY(v4wt_cache_fns) ...@@ -167,6 +180,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_user_cache_all .long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range .long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range .long v4wt_coherent_kern_range
.long v4wt_coherent_user_range
.long v4wt_flush_kern_dcache_page .long v4wt_flush_kern_dcache_page
.long v4wt_dma_inv_range .long v4wt_dma_inv_range
.long v4wt_dma_clean_range .long v4wt_dma_clean_range
......
...@@ -75,6 +75,22 @@ ENTRY(v6_flush_user_cache_range) ...@@ -75,6 +75,22 @@ ENTRY(v6_flush_user_cache_range)
* - the Icache does not read data from the write buffer * - the Icache does not read data from the write buffer
*/ */
ENTRY(v6_coherent_kern_range) ENTRY(v6_coherent_kern_range)
/* FALLTHROUGH */
/*
* v6_coherent_user_range(start,end)
*
* Ensure that the I and D caches are coherent within specified
* region. This is typically used when code has been written to
* a memory region, and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
ENTRY(v6_coherent_user_range)
bic r0, r0, #CACHE_LINE_SIZE - 1 bic r0, r0, #CACHE_LINE_SIZE - 1
1: 1:
#ifdef HARVARD_CACHE #ifdef HARVARD_CACHE
...@@ -203,6 +219,7 @@ ENTRY(v6_cache_fns) ...@@ -203,6 +219,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_user_cache_all .long v6_flush_user_cache_all
.long v6_flush_user_cache_range .long v6_flush_user_cache_range
.long v6_coherent_kern_range .long v6_coherent_kern_range
.long v6_coherent_user_range
.long v6_flush_kern_dcache_page .long v6_flush_kern_dcache_page
.long v6_dma_inv_range .long v6_dma_inv_range
.long v6_dma_clean_range .long v6_dma_clean_range
......
...@@ -31,14 +31,46 @@ static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED; ...@@ -31,14 +31,46 @@ static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED;
#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) #define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
copy_page(kto, kfrom);
}
/*
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
/* /*
* Copy the page, taking account of the cache colour. * Copy the page, taking account of the cache colour.
*/ */
void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{ {
unsigned int offset = DCACHE_COLOUR(vaddr); unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long from, to; unsigned long from, to;
/*
* Discard data in the kernel mapping for the new page.
* FIXME: needs this MCRR to be supported.
*/
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
/*
* Now copy the page using the same cache colour as the
* pages ultimate destination.
*/
spin_lock(&v6_lock); spin_lock(&v6_lock);
set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
...@@ -55,11 +87,30 @@ void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) ...@@ -55,11 +87,30 @@ void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
spin_unlock(&v6_lock); spin_unlock(&v6_lock);
} }
void v6_clear_user_page(void *kaddr, unsigned long vaddr) /*
* Clear the user page. We need to deal with the aliasing issues,
* so remap the kernel page into the same cache colour as the user
* page.
*/
void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{ {
unsigned int offset = DCACHE_COLOUR(vaddr); unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT); unsigned long to = to_address + (offset << PAGE_SHIFT);
/*
* Discard data in the kernel mapping for the new page
* FIXME: needs this MCRR to be supported.
*/
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kaddr),
"r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
/*
* Now clear the page using the same cache colour as
* the pages ultimate destination.
*/
spin_lock(&v6_lock); spin_lock(&v6_lock);
set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
...@@ -70,26 +121,31 @@ void v6_clear_user_page(void *kaddr, unsigned long vaddr) ...@@ -70,26 +121,31 @@ void v6_clear_user_page(void *kaddr, unsigned long vaddr)
} }
struct cpu_user_fns v6_user_fns __initdata = { struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page, .cpu_clear_user_page = v6_clear_user_page_nonaliasing,
.cpu_copy_user_page = v6_copy_user_page, .cpu_copy_user_page = v6_copy_user_page_nonaliasing,
}; };
static int __init v6_userpage_init(void) static int __init v6_userpage_init(void)
{ {
pgd_t *pgd; if (cache_is_vipt_aliasing()) {
pmd_t *pmd; pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset_k(from_address);
pmd = pmd_alloc(&init_mm, pgd, from_address); pgd = pgd_offset_k(from_address);
if (!pmd) pmd = pmd_alloc(&init_mm, pgd, from_address);
BUG(); if (!pmd)
from_pte = pte_alloc_kernel(&init_mm, pmd, from_address); BUG();
if (!from_pte) from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
BUG(); if (!from_pte)
BUG();
to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
if (!to_pte) to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
BUG(); if (!to_pte)
BUG();
v6_user_fns.cpu_clear_user_page = v6_clear_user_page_aliasing;
v6_user_fns.cpu_copy_user_page = v6_copy_user_page_aliasing;
}
return 0; return 0;
} }
......
...@@ -76,52 +76,6 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) ...@@ -76,52 +76,6 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
return 0; return 0;
} }
static void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
unsigned long offset;
pgoff_t pgoff;
__cpuc_flush_dcache_page(page_address(page));
if (!mapping)
return;
/*
* With a VIVT cache, we need to also write back
* and invalidate any user data.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
flush_dcache_mmap_lock(mapping);
vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
/*
* If this VMA is not in our MM, we can ignore it.
*/
if (mpnt->vm_mm != mm)
continue;
if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset);
}
flush_dcache_mmap_unlock(mapping);
}
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(page);
}
EXPORT_SYMBOL(flush_dcache_page);
static void static void
make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
{ {
...@@ -188,10 +142,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) ...@@ -188,10 +142,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
if (page_mapping(page)) { if (page_mapping(page)) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) if (dirty) {
/*
* This is our first userspace mapping of this page.
* Ensure that the physical page is coherent with
* the kernel mapping.
*
* FIXME: only need to do this on VIVT and aliasing
* VIPT cache architectures. We can do that
* by choosing whether to set this bit...
*/
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_page(page_address(page));
}
make_coherent(vma, addr, page, dirty); if (cache_is_vivt())
make_coherent(vma, addr, page, dirty);
} }
} }
......
/*
* linux/arch/arm/mm/flush.c
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <asm/system.h>
static void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
pgoff_t pgoff;
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
__cpuc_flush_dcache_page(page_address(page));
/*
* If there's no mapping pointer here, then this page isn't
* visible to userspace yet, so there are no cache lines
* associated with any other aliases.
*/
if (!mapping)
return;
/*
* There are possible user space mappings of this page:
* - VIVT cache: we need to also write back and invalidate all user
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
flush_dcache_mmap_lock(mapping);
vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
unsigned long offset;
/*
* If this VMA is not in our MM, we can ignore it.
*/
if (mpnt->vm_mm != mm)
continue;
if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset);
if (cache_is_vipt())
break;
}
flush_dcache_mmap_unlock(mapping);
}
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.
*
* We have three cases to consider:
* - VIPT non-aliasing cache: fully coherent so nothing required.
* - VIVT: fully aliasing, so we need to handle every alias in our
* current VM view.
* - VIPT aliasing: need to handle one alias in our current VM view.
*
* If we need to handle aliasing:
* If the page only exists in the page cache and there are no user
* space mappings, we can be lazy and remember that we may have dirty
* kernel cache lines for later. Otherwise, we assume we have
* aliasing mappings.
*/
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
if (cache_is_vipt_nonaliasing())
return;
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else
__flush_dcache_page(mapping, page);
}
EXPORT_SYMBOL(flush_dcache_page);
...@@ -196,6 +196,19 @@ ENTRY(arm1020_flush_user_cache_range) ...@@ -196,6 +196,19 @@ ENTRY(arm1020_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020_coherent_kern_range) ENTRY(arm1020_coherent_kern_range)
/* FALLTRHOUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
mcr p15, 0, ip, c7, c10, 4 mcr p15, 0, ip, c7, c10, 4
...@@ -317,6 +330,7 @@ ENTRY(arm1020_cache_fns) ...@@ -317,6 +330,7 @@ ENTRY(arm1020_cache_fns)
.long arm1020_flush_user_cache_all .long arm1020_flush_user_cache_all
.long arm1020_flush_user_cache_range .long arm1020_flush_user_cache_range
.long arm1020_coherent_kern_range .long arm1020_coherent_kern_range
.long arm1020_coherent_user_range
.long arm1020_flush_kern_dcache_page .long arm1020_flush_kern_dcache_page
.long arm1020_dma_inv_range .long arm1020_dma_inv_range
.long arm1020_dma_clean_range .long arm1020_dma_clean_range
......
...@@ -193,6 +193,18 @@ ENTRY(arm1020e_flush_user_cache_range) ...@@ -193,6 +193,18 @@ ENTRY(arm1020e_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1020e_coherent_kern_range) ENTRY(arm1020e_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
...@@ -304,6 +316,7 @@ ENTRY(arm1020e_cache_fns) ...@@ -304,6 +316,7 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_user_cache_all .long arm1020e_flush_user_cache_all
.long arm1020e_flush_user_cache_range .long arm1020e_flush_user_cache_range
.long arm1020e_coherent_kern_range .long arm1020e_coherent_kern_range
.long arm1020e_coherent_user_range
.long arm1020e_flush_kern_dcache_page .long arm1020e_flush_kern_dcache_page
.long arm1020e_dma_inv_range .long arm1020e_dma_inv_range
.long arm1020e_dma_clean_range .long arm1020e_dma_clean_range
......
...@@ -180,6 +180,19 @@ ENTRY(arm1022_flush_user_cache_range) ...@@ -180,6 +180,19 @@ ENTRY(arm1022_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1022_coherent_kern_range) ENTRY(arm1022_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1022_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
...@@ -291,6 +304,7 @@ ENTRY(arm1022_cache_fns) ...@@ -291,6 +304,7 @@ ENTRY(arm1022_cache_fns)
.long arm1022_flush_user_cache_all .long arm1022_flush_user_cache_all
.long arm1022_flush_user_cache_range .long arm1022_flush_user_cache_range
.long arm1022_coherent_kern_range .long arm1022_coherent_kern_range
.long arm1022_coherent_user_range
.long arm1022_flush_kern_dcache_page .long arm1022_flush_kern_dcache_page
.long arm1022_dma_inv_range .long arm1022_dma_inv_range
.long arm1022_dma_clean_range .long arm1022_dma_clean_range
......
...@@ -175,6 +175,18 @@ ENTRY(arm1026_flush_user_cache_range) ...@@ -175,6 +175,18 @@ ENTRY(arm1026_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm1026_coherent_kern_range) ENTRY(arm1026_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1026_coherent_user_range)
mov ip, #0 mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: 1:
...@@ -286,6 +298,7 @@ ENTRY(arm1026_cache_fns) ...@@ -286,6 +298,7 @@ ENTRY(arm1026_cache_fns)
.long arm1026_flush_user_cache_all .long arm1026_flush_user_cache_all
.long arm1026_flush_user_cache_range .long arm1026_flush_user_cache_range
.long arm1026_coherent_kern_range .long arm1026_coherent_kern_range
.long arm1026_coherent_user_range
.long arm1026_flush_kern_dcache_page .long arm1026_flush_kern_dcache_page
.long arm1026_dma_inv_range .long arm1026_dma_inv_range
.long arm1026_dma_clean_range .long arm1026_dma_clean_range
......
...@@ -182,6 +182,19 @@ ENTRY(arm920_flush_user_cache_range) ...@@ -182,6 +182,19 @@ ENTRY(arm920_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm920_coherent_kern_range) ENTRY(arm920_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm920_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -277,6 +290,7 @@ ENTRY(arm920_cache_fns) ...@@ -277,6 +290,7 @@ ENTRY(arm920_cache_fns)
.long arm920_flush_user_cache_all .long arm920_flush_user_cache_all
.long arm920_flush_user_cache_range .long arm920_flush_user_cache_range
.long arm920_coherent_kern_range .long arm920_coherent_kern_range
.long arm920_coherent_user_range
.long arm920_flush_kern_dcache_page .long arm920_flush_kern_dcache_page
.long arm920_dma_inv_range .long arm920_dma_inv_range
.long arm920_dma_clean_range .long arm920_dma_clean_range
......
...@@ -184,6 +184,19 @@ ENTRY(arm922_flush_user_cache_range) ...@@ -184,6 +184,19 @@ ENTRY(arm922_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm922_coherent_kern_range) ENTRY(arm922_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm922_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -279,6 +292,7 @@ ENTRY(arm922_cache_fns) ...@@ -279,6 +292,7 @@ ENTRY(arm922_cache_fns)
.long arm922_flush_user_cache_all .long arm922_flush_user_cache_all
.long arm922_flush_user_cache_range .long arm922_flush_user_cache_range
.long arm922_coherent_kern_range .long arm922_coherent_kern_range
.long arm922_coherent_user_range
.long arm922_flush_kern_dcache_page .long arm922_flush_kern_dcache_page
.long arm922_dma_inv_range .long arm922_dma_inv_range
.long arm922_dma_clean_range .long arm922_dma_clean_range
......
...@@ -225,6 +225,19 @@ ENTRY(arm925_flush_user_cache_range) ...@@ -225,6 +225,19 @@ ENTRY(arm925_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm925_coherent_kern_range) ENTRY(arm925_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm925_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -329,6 +342,7 @@ ENTRY(arm925_cache_fns) ...@@ -329,6 +342,7 @@ ENTRY(arm925_cache_fns)
.long arm925_flush_user_cache_all .long arm925_flush_user_cache_all
.long arm925_flush_user_cache_range .long arm925_flush_user_cache_range
.long arm925_coherent_kern_range .long arm925_coherent_kern_range
.long arm925_coherent_user_range
.long arm925_flush_kern_dcache_page .long arm925_flush_kern_dcache_page
.long arm925_dma_inv_range .long arm925_dma_inv_range
.long arm925_dma_clean_range .long arm925_dma_clean_range
......
...@@ -185,6 +185,19 @@ ENTRY(arm926_flush_user_cache_range) ...@@ -185,6 +185,19 @@ ENTRY(arm926_flush_user_cache_range)
* - end - virtual end address * - end - virtual end address
*/ */
ENTRY(arm926_coherent_kern_range) ENTRY(arm926_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm926_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
...@@ -289,6 +302,7 @@ ENTRY(arm926_cache_fns) ...@@ -289,6 +302,7 @@ ENTRY(arm926_cache_fns)
.long arm926_flush_user_cache_all .long arm926_flush_user_cache_all
.long arm926_flush_user_cache_range .long arm926_flush_user_cache_range
.long arm926_coherent_kern_range .long arm926_coherent_kern_range
.long arm926_coherent_user_range
.long arm926_flush_kern_dcache_page .long arm926_flush_kern_dcache_page
.long arm926_dma_inv_range .long arm926_dma_inv_range
.long arm926_dma_clean_range .long arm926_dma_clean_range
......
...@@ -241,6 +241,22 @@ ENTRY(xscale_flush_user_cache_range) ...@@ -241,6 +241,22 @@ ENTRY(xscale_flush_user_cache_range)
* it also trashes the mini I-cache used by JTAG debuggers. * it also trashes the mini I-cache used by JTAG debuggers.
*/ */
ENTRY(xscale_coherent_kern_range) ENTRY(xscale_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
ENTRY(xscale_coherent_user_range)
bic r0, r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
...@@ -341,6 +357,7 @@ ENTRY(xscale_cache_fns) ...@@ -341,6 +357,7 @@ ENTRY(xscale_cache_fns)
.long xscale_flush_user_cache_all .long xscale_flush_user_cache_all
.long xscale_flush_user_cache_range .long xscale_flush_user_cache_range
.long xscale_coherent_kern_range .long xscale_coherent_kern_range
.long xscale_coherent_user_range
.long xscale_flush_kern_dcache_page .long xscale_flush_kern_dcache_page
.long xscale_dma_inv_range .long xscale_dma_inv_range
.long xscale_dma_clean_range .long xscale_dma_clean_range
......
/* /*
* linux/include/asm-arm/arch-ebsa110/param.h * linux/include/asm-arm/arch-ebsa110/param.h
*/ */
#define __KERNEL_HZ 200 #define HZ 200
...@@ -16,4 +16,4 @@ ...@@ -16,4 +16,4 @@
/* /*
* See 'time.h' for how the RTC HZ rate is set * See 'time.h' for how the RTC HZ rate is set
*/ */
#define __KERNEL_HZ 128 #define HZ 128
...@@ -59,10 +59,9 @@ typedef struct { volatile u32 offset[4096]; } __regbase; ...@@ -59,10 +59,9 @@ typedef struct { volatile u32 offset[4096]; } __regbase;
# define __REG(x) __REGP(io_p2v(x)) # define __REG(x) __REGP(io_p2v(x))
#endif #endif
/* Let's kick gcc's ass again... */ /* With indexed regs we don't want to feed the index through io_p2v()
# define __REG2(x,y) \ especially if it is a variable, otherwise horrible code will result. */
( __builtin_constant_p(y) ? (__REG((x) + (y))) \ # define __REG2(x,y) (*(volatile u32 *)((u32)&__REG(x) + (y)))
: (*(volatile u32 *)((u32)&__REG(x) + (y))) )
# define __PREG(x) (io_v2p((u32)&(x))) # define __PREG(x) (io_v2p((u32)&(x)))
......
...@@ -12,19 +12,34 @@ ...@@ -12,19 +12,34 @@
#include <linux/config.h> #include <linux/config.h>
#define PXA_IRQ_SKIP 7 /* The first 7 IRQs are not yet used */ #ifdef CONFIG_PXA27x
#define PXA_IRQ_SKIP 0
#else
#define PXA_IRQ_SKIP 7
#endif
#define PXA_IRQ(x) ((x) - PXA_IRQ_SKIP) #define PXA_IRQ(x) ((x) - PXA_IRQ_SKIP)
#define IRQ_HWUART PXA_IRQ(7) /* HWUART Transmit/Receive/Error */ #define IRQ_SSP3 PXA_IRQ(0) /* SSP3 service request */
#define IRQ_MSL PXA_IRQ(1) /* MSL Interface interrupt */
#define IRQ_USBH2 PXA_IRQ(2) /* USB Host interrupt 1 (OHCI) */
#define IRQ_USBH1 PXA_IRQ(3) /* USB Host interrupt 2 (non-OHCI) */
#define IRQ_KEYPAD PXA_IRQ(4) /* Key pad controller */
#define IRQ_MEMSTK PXA_IRQ(5) /* Memory Stick interrupt */
#define IRQ_PWRI2C PXA_IRQ(6) /* Power I2C interrupt */
#define IRQ_HWUART PXA_IRQ(7) /* HWUART Transmit/Receive/Error (PXA26x) */
#define IRQ_OST_4_11 PXA_IRQ(7) /* OS timer 4-11 matches (PXA27x) */
#define IRQ_GPIO0 PXA_IRQ(8) /* GPIO0 Edge Detect */ #define IRQ_GPIO0 PXA_IRQ(8) /* GPIO0 Edge Detect */
#define IRQ_GPIO1 PXA_IRQ(9) /* GPIO1 Edge Detect */ #define IRQ_GPIO1 PXA_IRQ(9) /* GPIO1 Edge Detect */
#define IRQ_GPIO_2_80 PXA_IRQ(10) /* GPIO[2-80] Edge Detect */ #define IRQ_GPIO_2_x PXA_IRQ(10) /* GPIO[2-x] Edge Detect */
#define IRQ_USB PXA_IRQ(11) /* USB Service */ #define IRQ_USB PXA_IRQ(11) /* USB Service */
#define IRQ_PMU PXA_IRQ(12) /* Performance Monitoring Unit */ #define IRQ_PMU PXA_IRQ(12) /* Performance Monitoring Unit */
#define IRQ_I2S PXA_IRQ(13) /* I2S Interrupt */ #define IRQ_I2S PXA_IRQ(13) /* I2S Interrupt */
#define IRQ_AC97 PXA_IRQ(14) /* AC97 Interrupt */ #define IRQ_AC97 PXA_IRQ(14) /* AC97 Interrupt */
#define IRQ_ASSP PXA_IRQ(15) /* Audio SSP Service Request */ #define IRQ_ASSP PXA_IRQ(15) /* Audio SSP Service Request (PXA25x) */
#define IRQ_NSSP PXA_IRQ(16) /* Network SSP Service Request */ #define IRQ_USIM PXA_IRQ(15) /* Smart Card interface interrupt (PXA27x) */
#define IRQ_NSSP PXA_IRQ(16) /* Network SSP Service Request (PXA25x) */
#define IRQ_SSP2 PXA_IRQ(16) /* SSP2 interrupt (PXA27x) */
#define IRQ_LCD PXA_IRQ(17) /* LCD Controller Service Request */ #define IRQ_LCD PXA_IRQ(17) /* LCD Controller Service Request */
#define IRQ_I2C PXA_IRQ(18) /* I2C Service Request */ #define IRQ_I2C PXA_IRQ(18) /* I2C Service Request */
#define IRQ_ICP PXA_IRQ(19) /* ICP Transmit/Receive/Error */ #define IRQ_ICP PXA_IRQ(19) /* ICP Transmit/Receive/Error */
...@@ -41,13 +56,28 @@ ...@@ -41,13 +56,28 @@
#define IRQ_RTC1Hz PXA_IRQ(30) /* RTC HZ Clock Tick */ #define IRQ_RTC1Hz PXA_IRQ(30) /* RTC HZ Clock Tick */
#define IRQ_RTCAlrm PXA_IRQ(31) /* RTC Alarm */ #define IRQ_RTCAlrm PXA_IRQ(31) /* RTC Alarm */
#define GPIO_2_80_TO_IRQ(x) \ #ifdef CONFIG_PXA27x
PXA_IRQ((x) - 2 + 32) #define IRQ_TPM PXA_IRQ(32) /* TPM interrupt */
#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_80_TO_IRQ(x)) #define IRQ_CAMERA PXA_IRQ(33) /* Camera Interface */
#define IRQ_TO_GPIO_2_80(i) \ #define PXA_INTERNAL_IRQS 34
((i) - PXA_IRQ(32) + 2) #else
#define IRQ_TO_GPIO(i) ((i) - (((i) > IRQ_GPIO1) ? IRQ_GPIO(2) - 2 : IRQ_GPIO(0))) #define PXA_INTERNAL_IRQS 32
#endif
#define GPIO_2_x_TO_IRQ(x) \
PXA_IRQ((x) - 2 + PXA_INTERNAL_IRQS)
#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
#define IRQ_TO_GPIO_2_x(i) \
((i) - IRQ_GPIO(2) + 2)
#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i))
#if defined(CONFIG_PXA25x)
#define PXA_LAST_GPIO 80
#elif defined(CONFIG_PXA27x)
#define PXA_LAST_GPIO 127
#endif
/* /*
* The next 16 interrupts are for board specific purposes. Since * The next 16 interrupts are for board specific purposes. Since
...@@ -55,7 +85,7 @@ ...@@ -55,7 +85,7 @@
* these. If you need more, increase IRQ_BOARD_END, but keep it * these. If you need more, increase IRQ_BOARD_END, but keep it
* within sensible limits. * within sensible limits.
*/ */
#define IRQ_BOARD_START (IRQ_GPIO(80) + 1) #define IRQ_BOARD_START (IRQ_GPIO(PXA_LAST_GPIO) + 1)
#define IRQ_BOARD_END (IRQ_BOARD_START + 16) #define IRQ_BOARD_END (IRQ_BOARD_START + 16)
#define IRQ_SA1111_START (IRQ_BOARD_END) #define IRQ_SA1111_START (IRQ_BOARD_END)
......
...@@ -1134,15 +1134,15 @@ typedef void (*ExcpHndlr) (void) ; ...@@ -1134,15 +1134,15 @@ typedef void (*ExcpHndlr) (void) ;
#define _GEDR(x) __REG2(0x40E00048, ((x) & 0x60) >> 3) #define _GEDR(x) __REG2(0x40E00048, ((x) & 0x60) >> 3)
#define _GAFR(x) __REG2(0x40E00054, ((x) & 0x70) >> 2) #define _GAFR(x) __REG2(0x40E00054, ((x) & 0x70) >> 2)
#define GPLR(x) ((((x) & 0x7f) < 96) ? _GPLR(x) : GPLR3) #define GPLR(x) (*((((x) & 0x7f) < 96) ? &_GPLR(x) : &GPLR3))
#define GPDR(x) ((((x) & 0x7f) < 96) ? _GPDR(x) : GPDR3) #define GPDR(x) (*((((x) & 0x7f) < 96) ? &_GPDR(x) : &GPDR3))
#define GPSR(x) ((((x) & 0x7f) < 96) ? _GPSR(x) : GPSR3) #define GPSR(x) (*((((x) & 0x7f) < 96) ? &_GPSR(x) : &GPSR3))
#define GPCR(x) ((((x) & 0x7f) < 96) ? _GPCR(x) : GPCR3) #define GPCR(x) (*((((x) & 0x7f) < 96) ? &_GPCR(x) : &GPCR3))
#define GRER(x) ((((x) & 0x7f) < 96) ? _GRER(x) : GRER3) #define GRER(x) (*((((x) & 0x7f) < 96) ? &_GRER(x) : &GRER3))
#define GFER(x) ((((x) & 0x7f) < 96) ? _GFER(x) : GFER3) #define GFER(x) (*((((x) & 0x7f) < 96) ? &_GFER(x) : &GFER3))
#define GEDR(x) ((((x) & 0x7f) < 96) ? _GEDR(x) : GEDR3) #define GEDR(x) (*((((x) & 0x7f) < 96) ? &_GEDR(x) : &GEDR3))
#define GAFR(x) ((((x) & 0x7f) < 96) ? _GAFR(x) : \ #define GAFR(x) (*((((x) & 0x7f) < 96) ? &_GAFR(x) : \
((((x) & 0x7f) < 112) ? GAFR3_L : GAFR3_U)) ((((x) & 0x7f) < 112) ? &GAFR3_L : &GAFR3_U)))
#else #else
#define GPLR(x) __REG2(0x40E00000, ((x) & 0x60) >> 3) #define GPLR(x) __REG2(0x40E00000, ((x) & 0x60) >> 3)
......
...@@ -22,6 +22,6 @@ ...@@ -22,6 +22,6 @@
* add a software pre-scaler to the evil timer systems. * add a software pre-scaler to the evil timer systems.
*/ */
#define __KERNEL_HZ 200 #define HZ 200
#endif /* __ASM_ARCH_PARAM_H */ #endif /* __ASM_ARCH_PARAM_H */
...@@ -44,21 +44,6 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -44,21 +44,6 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc"); : "cc");
} }
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -77,21 +62,6 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -77,21 +62,6 @@ static inline int atomic_add_return(int i, atomic_t *v)
return result; return result;
} }
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -135,15 +105,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -135,15 +105,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
v->counter += i;
local_irq_restore(flags);
}
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
...@@ -157,15 +118,6 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -157,15 +118,6 @@ static inline int atomic_add_return(int i, atomic_t *v)
return val; return val;
} }
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
v->counter -= i;
local_irq_restore(flags);
}
static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
...@@ -190,8 +142,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -190,8 +142,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#endif /* __LINUX_ARM_ARCH__ */ #endif /* __LINUX_ARM_ARCH__ */
#define atomic_inc(v) atomic_add(1, v) #define atomic_add(i, v) (void) atomic_add_return(i, v)
#define atomic_dec(v) atomic_sub(1, v) #define atomic_inc(v) (void) atomic_add_return(1, v)
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
#define atomic_dec(v) (void) atomic_sub_return(1, v)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
......
...@@ -157,6 +157,7 @@ struct cpu_cache_fns { ...@@ -157,6 +157,7 @@ struct cpu_cache_fns {
void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *); void (*flush_kern_dcache_page)(void *);
void (*dma_inv_range)(unsigned long, unsigned long); void (*dma_inv_range)(unsigned long, unsigned long);
...@@ -175,6 +176,7 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -175,6 +176,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/* /*
...@@ -193,12 +195,14 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -193,12 +195,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *); extern void __cpuc_flush_dcache_page(void *);
/* /*
...@@ -267,6 +271,14 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr) ...@@ -267,6 +271,14 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
} }
} }
/*
* flush_cache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
#define flush_cache_user_range(vma,start,end) \
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
/* /*
* Perform necessary cache operations to ensure that data previously * Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU. * stored within this range of addresses can be executed by the CPU.
......
...@@ -10,14 +10,13 @@ ...@@ -10,14 +10,13 @@
#ifndef __ASM_PARAM_H #ifndef __ASM_PARAM_H
#define __ASM_PARAM_H #define __ASM_PARAM_H
#include <asm/arch/param.h> /* for HZ */ #ifdef __KERNEL__
# include <asm/arch/param.h> /* for kernel version of HZ */
#ifndef __KERNEL_HZ # ifndef HZ
#define __KERNEL_HZ 100 # define HZ 100 /* Internal kernel timer frequency */
#endif # endif
#ifdef __KERNEL__
# define HZ __KERNEL_HZ /* Internal kernel timer frequency */
# define USER_HZ 100 /* User interfaces are in "ticks" */ # define USER_HZ 100 /* User interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ # define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#else #else
......
...@@ -55,6 +55,38 @@ ...@@ -55,6 +55,38 @@
__val; \ __val; \
}) })
#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
#define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25))
#define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25))
#define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
#define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
#define cache_is_vivt() \
({ \
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
(!__cacheid_present(__val)) || __cacheid_vivt(__val); \
})
#define cache_is_vipt() \
({ \
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
__cacheid_present(__val) && __cacheid_vipt(__val); \
})
#define cache_is_vipt_nonaliasing() \
({ \
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
__cacheid_present(__val) && \
__cacheid_vipt_nonaliasing(__val); \
})
#define cache_is_vipt_aliasing() \
({ \
unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
__cacheid_present(__val) && \
__cacheid_vipt_aliasing(__val); \
})
/* /*
* This is used to ensure the compiler did actually allocate the register we * This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust * asked it for some inline assembly sequences. Apparently we can't trust
......
...@@ -475,51 +475,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6 ...@@ -475,51 +475,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
#include <linux/types.h> #include <linux/types.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
static inline pid_t setsid(void)
{
return sys_setsid();
}
static inline long write(int fd, const char *buf, off_t count)
{
return sys_write(fd, buf, count);
}
static inline long read(int fd, char *buf, off_t count)
{
return sys_read(fd, buf, count);
}
static inline off_t lseek(int fd, off_t offset, int count)
{
return sys_lseek(fd, offset, count);
}
static inline long dup(int fd)
{
return sys_dup(fd);
}
static inline long open(const char *file, int flag, int mode)
{
return sys_open(file, flag, mode);
}
static inline long close(int fd)
{
return sys_close(fd);
}
static inline long _exit(int exitcode)
{
return sys_exit(exitcode);
}
static inline pid_t waitpid(pid_t pid, int *wait_stat, int options)
{
return sys_wait4((int)pid, wait_stat, options, NULL);
}
extern long execve(const char *file, char **argv, char **envp); extern long execve(const char *file, char **argv, char **envp);
struct pt_regs; struct pt_regs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment