Commit 6dab7ede authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull ARM fixes from Russell King:
 "The largest thing in this set of changes is bringing back some of the
  ARMv3 code to fix a compile problem noticed on RiscPC, which we still
  support, even though we only support ARMv4 there.

  (The reason is that the system bus doesn't support ARMv4 half-word
  accesses, so we need the ARMv3 library code for this platform.)

  The rest are all quite minor fixes."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7490/1: Drop duplicate select for GENERIC_IRQ_PROBE
  ARM: Bring back ARMv3 IO and user access code
  ARM: 7489/1: errata: fix workaround for erratum #720789 on UP systems
  ARM: 7488/1: mm: use 5 bits for swapfile type encoding
  ARM: 7487/1: mm: avoid setting nG bit for user mappings that aren't present
  ARM: 7486/1: sched_clock: update epoch_cyc on resume
  ARM: 7484/1: Don't enable GENERIC_LOCKBREAK with ticket spinlocks
  ARM: 7483/1: vfp: only advertise VFPv4 in hwcaps if CONFIG_VFPv3 is enabled
  ARM: 7482/1: topology: fix section mismatch warning for init_cpu_topology
parents d9ec0fdc 89868730
...@@ -38,7 +38,6 @@ config ARM ...@@ -38,7 +38,6 @@ config ARM
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_IRQ_PROBE
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
select CPU_PM if (SUSPEND || CPU_IDLE) select CPU_PM if (SUSPEND || CPU_IDLE)
...@@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT ...@@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool bool
default y default y
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
bool bool
default y default y
......
...@@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) ...@@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0)
#define pte_present_user(pte) \
((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
(L_PTE_PRESENT | L_PTE_USER))
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval) static inline void __sync_icache_dcache(pte_t pteval)
{ {
...@@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval); ...@@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval) pte_t *ptep, pte_t pteval)
{ {
if (addr >= TASK_SIZE) unsigned long ext = 0;
set_pte_ext(ptep, pteval, 0);
else { if (addr < TASK_SIZE && pte_present_user(pteval)) {
__sync_icache_dcache(pteval); __sync_icache_dcache(pteval);
set_pte_ext(ptep, pteval, PTE_EXT_NG); ext |= PTE_EXT_NG;
} }
}
#define pte_none(pte) (!pte_val(pte)) set_pte_ext(ptep, pteval, ext);
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) }
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0)
#define pte_present_user(pte) \
((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
(L_PTE_PRESENT | L_PTE_USER))
#define PTE_BIT_FUNC(fn,op) \ #define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
...@@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* *
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* <--------------- offset --------------------> <- type --> 0 0 0 * <--------------- offset ----------------------> < type -> 0 0 0
* *
* This gives us up to 63 swap files and 32GB per swap file. Note that * This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero. * the offset field is always non-zero.
*/ */
#define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6 #define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
......
...@@ -10,5 +10,7 @@ ...@@ -10,5 +10,7 @@
extern void sched_clock_postinit(void); extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
unsigned long rate);
#endif #endif
...@@ -21,6 +21,8 @@ struct clock_data { ...@@ -21,6 +21,8 @@ struct clock_data {
u32 epoch_cyc_copy; u32 epoch_cyc_copy;
u32 mult; u32 mult;
u32 shift; u32 shift;
bool suspended;
bool needs_suspend;
}; };
static void sched_clock_poll(unsigned long wrap_ticks); static void sched_clock_poll(unsigned long wrap_ticks);
...@@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) ...@@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
u64 epoch_ns; u64 epoch_ns;
u32 epoch_cyc; u32 epoch_cyc;
if (cd.suspended)
return cd.epoch_ns;
/* /*
* Load the epoch_cyc and epoch_ns atomically. We do this by * Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and * ensuring that we always write epoch_cyc, epoch_ns and
...@@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks) ...@@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock(); update_sched_clock();
} }
void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
unsigned long rate)
{
setup_sched_clock(read, bits, rate);
cd.needs_suspend = true;
}
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{ {
unsigned long r, w; unsigned long r, w;
...@@ -169,11 +181,23 @@ void __init sched_clock_postinit(void) ...@@ -169,11 +181,23 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void) static int sched_clock_suspend(void)
{ {
sched_clock_poll(sched_clock_timer.data); sched_clock_poll(sched_clock_timer.data);
if (cd.needs_suspend)
cd.suspended = true;
return 0; return 0;
} }
static void sched_clock_resume(void)
{
if (cd.needs_suspend) {
cd.epoch_cyc = read_sched_clock();
cd.epoch_cyc_copy = cd.epoch_cyc;
cd.suspended = false;
}
}
static struct syscore_ops sched_clock_ops = { static struct syscore_ops sched_clock_ops = {
.suspend = sched_clock_suspend, .suspend = sched_clock_suspend,
.resume = sched_clock_resume,
}; };
static int __init sched_clock_syscore_init(void) static int __init sched_clock_syscore_init(void)
......
...@@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid) ...@@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid)
* init_cpu_topology is called at boot when only one cpu is running * init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array * which prevent simultaneous write access to cpu_topology array
*/ */
void init_cpu_topology(void) void __init init_cpu_topology(void)
{ {
unsigned int cpu; unsigned int cpu;
......
...@@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ ...@@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
call_with_stack.o call_with_stack.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o mmu-y := clear_user.o copy_page.o getuser.o putuser.o
mmu-y += copy_from_user.o copy_to_user.o
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
mmu-y += copy_from_user.o copy_to_user.o
else
ifneq ($(CONFIG_CPU_32v3),y)
mmu-y += copy_from_user.o copy_to_user.o
else
mmu-y += uaccess.o
endif
endif
# using lib_ here won't override already available weak symbols # using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
lib-$(CONFIG_MMU) += $(mmu-y) lib-$(CONFIG_MMU) += $(mmu-y)
lib-y += io-readsw-armv4.o io-writesw-armv4.o
ifeq ($(CONFIG_CPU_32v3),y)
lib-y += io-readsw-armv3.o io-writesw-armv3.o
else
lib-y += io-readsw-armv4.o io-writesw-armv4.o
endif
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
lib-$(CONFIG_ARCH_SHARK) += io-shark.o lib-$(CONFIG_ARCH_SHARK) += io-shark.o
......
/*
* linux/arch/arm/lib/io-readsw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
mov r2, lr
b panic
.Linsw_bad_align_msg:
.asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align
.Linsw_align: tst r1, #1
bne .Linsw_bad_alignment
ldr r3, [r0]
strb r3, [r1], #1
mov r3, r3, lsr #8
strb r3, [r1], #1
subs r2, r2, #1
moveq pc, lr
ENTRY(__raw_readsw)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
tst r1, #3
bne .Linsw_align
.Linsw_aligned: mov ip, #0xff
orr ip, ip, ip, lsl #8
stmfd sp!, {r4, r5, r6, lr}
subs r2, r2, #8
bmi .Lno_insw_8
.Linsw_8_lp: ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16
ldr r5, [r0]
and r5, r5, ip
ldr r6, [r0]
orr r5, r5, r6, lsl #16
ldr r6, [r0]
and r6, r6, ip
ldr lr, [r0]
orr r6, r6, lr, lsl #16
stmia r1!, {r3 - r6}
subs r2, r2, #8
bpl .Linsw_8_lp
tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
ldr r4, [r0]
and r4, r4, ip
ldr r5, [r0]
orr r4, r4, r5, lsl #16
stmia r1!, {r3, r4}
.Lno_insw_4: tst r2, #2
beq .Lno_insw_2
ldr r3, [r0]
and r3, r3, ip
ldr r4, [r0]
orr r3, r3, r4, lsl #16
str r3, [r1], #4
.Lno_insw_2: tst r2, #1
ldrne r3, [r0]
strneb r3, [r1], #1
movne r3, r3, lsr #8
strneb r3, [r1]
ldmfd sp!, {r4, r5, r6, pc}
/*
* linux/arch/arm/lib/io-writesw-armv3.S
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
mov r2, lr
b panic
.Loutsw_bad_align_msg:
.asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
.align
.Loutsw_align: tst r1, #1
bne .Loutsw_bad_alignment
add r1, r1, #2
ldr r3, [r1, #-4]
mov r3, r3, lsr #16
orr r3, r3, r3, lsl #16
str r3, [r0]
subs r2, r2, #1
moveq pc, lr
ENTRY(__raw_writesw)
teq r2, #0 @ do we have to check for the zero len?
moveq pc, lr
tst r1, #3
bne .Loutsw_align
stmfd sp!, {r4, r5, r6, lr}
subs r2, r2, #8
bmi .Lno_outsw_8
.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6}
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r5, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r5, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r6, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r6, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
subs r2, r2, #8
bpl .Loutsw_8_lp
tst r2, #7
ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
ldmia r1!, {r3, r4}
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
mov ip, r4, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r4, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
.Lno_outsw_4: tst r2, #2
beq .Lno_outsw_2
ldr r3, [r1], #4
mov ip, r3, lsl #16
orr ip, ip, ip, lsr #16
str ip, [r0]
mov ip, r3, lsr #16
orr ip, ip, ip, lsl #16
str ip, [r0]
.Lno_outsw_2: tst r2, #1
ldrne r3, [r1]
movne ip, r3, lsl #16
orrne ip, ip, ip, lsr #16
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}
This diff is collapsed.
...@@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval) ...@@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval)
struct page *page; struct page *page;
struct address_space *mapping; struct address_space *mapping;
if (!pte_present_user(pteval))
return;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
/* only flush non-aliasing VIPT caches for exec mappings */ /* only flush non-aliasing VIPT caches for exec mappings */
return; return;
......
...@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range) ...@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)
dsb dsb
mov r0, r0, lsr #PAGE_SHIFT @ align address mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT mov r1, r1, lsr #PAGE_SHIFT
#ifdef CONFIG_ARM_ERRATA_720789
mov r3, #0
#else
asid r3, r3 @ mask ASID asid r3, r3 @ mask ASID
#ifdef CONFIG_ARM_ERRATA_720789
ALT_SMP(W(mov) r3, #0 )
ALT_UP(W(nop) )
#endif #endif
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT
......
...@@ -719,8 +719,10 @@ static int __init vfp_init(void) ...@@ -719,8 +719,10 @@ static int __init vfp_init(void)
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON; elf_hwcap |= HWCAP_NEON;
#endif #endif
#ifdef CONFIG_VFPv3
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4; elf_hwcap |= HWCAP_VFPv4;
#endif
} }
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment