Commit 05de55a4 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.linux1394.org/ieee1394-2.6

into ppc970.osdl.org:/home/torvalds/v2.5/linux
parents 56880b13 6ed71734
...@@ -421,9 +421,9 @@ config PM ...@@ -421,9 +421,9 @@ config PM
Power Management is most important for battery powered laptop Power Management is most important for battery powered laptop
computers; if you have a laptop, check out the Linux Laptop home computers; if you have a laptop, check out the Linux Laptop home
page on the WWW at page on the WWW at <http://www.linux-on-laptops.com/> or
<http://www.cs.utexas.edu/users/kharker/linux-laptop/> and the Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>
Battery Powered Linux mini-HOWTO, available from and the Battery Powered Linux mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>. <http://www.tldp.org/docs.html#howto>.
Note that, even if you say N here, Linux on the x86 architecture Note that, even if you say N here, Linux on the x86 architecture
......
...@@ -91,10 +91,28 @@ config CPU_ARM922T ...@@ -91,10 +91,28 @@ config CPU_ARM922T
Say Y if you want support for the ARM922T processor. Say Y if you want support for the ARM922T processor.
Otherwise, say N. Otherwise, say N.
# ARM925T
config CPU_ARM925T
bool
depends on ARCH_OMAP1510
default y
select CPU_32v4
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
select CPU_COPY_V4WB
select CPU_TLB_V4WBI
help
The ARM925T is a mix between the ARM920T and ARM926T, but with
different instruction and data caches. It is used in TI's OMAP
device family.
Say Y if you want support for the ARM925T processor.
Otherwise, say N.
# ARM926T # ARM926T
config CPU_ARM926T config CPU_ARM926T
bool "Support ARM926T processor" bool "Support ARM926T processor"
depends on ARCH_INTEGRATOR depends on ARCH_INTEGRATOR || ARCH_OMAP1610
select CPU_32v5 select CPU_32v5
select CPU_ABRT_EV5TJ select CPU_ABRT_EV5TJ
select CPU_COPY_V4WB select CPU_COPY_V4WB
...@@ -288,7 +306,7 @@ comment "Processor Features" ...@@ -288,7 +306,7 @@ comment "Processor Features"
config ARM_THUMB config ARM_THUMB
bool "Support Thumb user binaries" bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE
default y default y
help help
Say Y if you want to include kernel support for running user space Say Y if you want to include kernel support for running user space
...@@ -311,21 +329,21 @@ config CPU_BIG_ENDIAN ...@@ -311,21 +329,21 @@ config CPU_BIG_ENDIAN
config CPU_ICACHE_DISABLE config CPU_ICACHE_DISABLE
bool "Disable I-Cache" bool "Disable I-Cache"
depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
help help
Say Y here to disable the processor instruction cache. Unless Say Y here to disable the processor instruction cache. Unless
you have a reason not to or are unsure, say N. you have a reason not to or are unsure, say N.
config CPU_DCACHE_DISABLE config CPU_DCACHE_DISABLE
bool "Disable D-Cache" bool "Disable D-Cache"
depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
help help
Say Y here to disable the processor data cache. Unless Say Y here to disable the processor data cache. Unless
you have a reason not to or are unsure, say N. you have a reason not to or are unsure, say N.
config CPU_DCACHE_WRITETHROUGH config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache" bool "Force write through D-cache"
depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM926T || CPU_ARM1020) && !CPU_DISABLE_DCACHE depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DISABLE_DCACHE
help help
Say Y here to use the data cache in writethough mode. Unless you Say Y here to use the data cache in writethough mode. Unless you
specifically require this or are unsure, say N. specifically require this or are unsure, say N.
......
...@@ -39,6 +39,7 @@ obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o ...@@ -39,6 +39,7 @@ obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o
obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o
obj-$(CONFIG_CPU_ARM925T) += proc-arm925.o
obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o
obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o
obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o
......
/*
* linux/arch/arm/mm/arm925.S: MMU functions for ARM925
*
* Copyright (C) 1999,2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* Copyright (C) 2002 RidgeRun, Inc.
* Copyright (C) 2002-2003 MontaVista Software, Inc.
*
* Update for Linux-2.6 and cache flush improvements
* Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* These are the low level assembler for performing cache and TLB
* functions on the arm925.
*
* CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
*
* Some additional notes based on deciphering the TI TRM on OMAP-5910:
*
* NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
* entry mode" must be 0 to flush the entries in both segments
* at once. This is the default value. See TRM 2-20 and 2-24 for
* more information.
*
* NOTE2: Default is the "D-cache clean and flush entry mode". It looks
* like the "Transparent mode" must be on for partial cache flushes
* to work in this mode. This mode only works with 16-bit external
* memory. See TRM 2-24 for more information.
*
* NOTE3: Write-back cache flushing seems to be flakey with devices using
* direct memory access, such as USB OHCI. The workaround is to use
* write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
* the default for OMAP-1510).
*/
#include <linux/linkage.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 16
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 2
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 256
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 8192
.text
/*
* cpu_arm925_proc_init()
*/
ENTRY(cpu_arm925_proc_init)
mov pc, lr
/*
* cpu_arm925_proc_fin()
*/
ENTRY(cpu_arm925_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl arm925_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_arm925_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_arm925_reset)
/* Send software reset to MPU and DSP */
mov ip, #0xff000000
orr ip, ip, #0x00fe0000
orr ip, ip, #0x0000ce00
mov r4, #1
strh r4, [ip, #0x10]
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_arm925_do_idle()
*
* Called with IRQs disabled
*/
.align 10
ENTRY(cpu_arm925_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bic r2, r1, #1 << 12
mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
mov pc, lr
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
* address space.
*/
ENTRY(arm925_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm925_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* Flush entries in both segments at once, see NOTE1 above */
mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 4
bcs 2b @ entries 255 to 0
#endif
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_user_cache_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address range.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
ENTRY(arm925_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bgt __flush_whole_cache
1: tst r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
#endif
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm925_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(arm925_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm925_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
#endif
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm925_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm925_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
#else
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm925_cache_fns)
.long arm925_flush_kern_cache_all
.long arm925_flush_user_cache_all
.long arm925_flush_user_cache_range
.long arm925_coherent_kern_range
.long arm925_flush_kern_dcache_page
.long arm925_dma_inv_range
.long arm925_dma_clean_range
.long arm925_dma_flush_range
ENTRY(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_arm925_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm925_switch_mm)
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* Flush entries in bothe segments at once, see NOTE1 above */
mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 4
bcs 2b @ entries 255 to 0
#endif
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_arm925_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm925_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r3, r2, #0x0a @ C & small page?
tst r3, #0x0b
biceq r2, r2, #4
#endif
str r2, [r0] @ hardware version
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
__INIT
.type __arm925_setup, #function
__arm925_setup:
mov r0, #0
#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
orr r0,r0,#1 << 7
#endif
/* Transparent on, D-cache clean & flush mode. See NOTE2 above */
orr r0,r0,#1 << 1 @ transparent mode on
mcr p15, 0, r0, c15, c1, 0 @ write TI config register
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
mcr p15, 0, r4, c2, c0 @ load page table pointer
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r0, #4 @ disable write-back on caches explicitly
mcr p15, 7, r0, c15, c0, 0
#endif
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mrc p15, 0, r0, c1, c0 @ get control register v4
/*
* Clear out 'unwanted' bits (then put them in if we need them)
*/
@ VI ZFRS BLDP WCAM
bic r0, r0, #0x0e00
bic r0, r0, #0x0002
bic r0, r0, #0x000c
bic r0, r0, #0x1000 @ ...0 000. .... 000.
/*
* Turn on what we want
*/
orr r0, r0, #0x0031
orr r0, r0, #0x2100 @ ..1. ...1 ..11 ...1
/* Writebuffer on */
orr r0, r0, #0x0008 @ .... .... .... 1...
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .1.. .... .... ....
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
orr r0, r0, #0x0004 @ .... .... .... .1..
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
orr r0, r0, #0x1000 @ ...1 .... .... ....
#endif
mov pc, lr
.size __arm925_setup, . - __arm925_setup
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.type arm925_processor_functions, #object
arm925_processor_functions:
.word v4t_early_abort
.word cpu_arm925_proc_init
.word cpu_arm925_proc_fin
.word cpu_arm925_reset
.word cpu_arm925_do_idle
.word cpu_arm925_dcache_clean_area
.word cpu_arm925_switch_mm
.word cpu_arm925_set_pte
.size arm925_processor_functions, . - arm925_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4t"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm925_name, #object
cpu_arm925_name:
.ascii "ARM925T"
#ifndef CONFIG_CPU_ICACHE_DISABLE
.ascii "i"
#endif
#ifndef CONFIG_CPU_DCACHE_DISABLE
.ascii "d"
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
.ascii "(wt)"
#else
.ascii "(wb)"
#endif
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
.ascii "RR"
#endif
#endif
.ascii "\0"
.size cpu_arm925_name, . - cpu_arm925_name
.align
.section ".proc.info", #alloc, #execinstr
.type __arm925_proc_info,#object
__arm925_proc_info:
.long 0x54029250
.long 0xfffffff0
.long 0x00000c12 @ mmuflags
b __arm925_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm925_name
.long arm925_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm925_cache_fns
.size __arm925_proc_info, . - __arm925_proc_info
.type __arm915_proc_info,#object
__arm915_proc_info:
.long 0x54029150
.long 0xfffffff0
.long 0x00000c12 @ mmuflags
b __arm925_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm925_name
.long arm925_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm925_cache_fns
.size __arm925_proc_info, . - __arm925_proc_info
...@@ -311,9 +311,6 @@ static struct i2c_adapter ioc_ops = { ...@@ -311,9 +311,6 @@ static struct i2c_adapter ioc_ops = {
.algo_data = &ioc_data, .algo_data = &ioc_data,
.client_register = ioc_client_reg, .client_register = ioc_client_reg,
.client_unregister = ioc_client_unreg, .client_unregister = ioc_client_unreg,
.dev = {
.name = "IOC/IOMD",
},
}; };
static int __init i2c_ioc_init(void) static int __init i2c_ioc_init(void)
......
...@@ -37,8 +37,7 @@ static struct i2c_client_address_data addr_data = { ...@@ -37,8 +37,7 @@ static struct i2c_client_address_data addr_data = {
#define DAT(x) ((unsigned int)(x->dev.driver_data)) #define DAT(x) ((unsigned int)(x->dev.driver_data))
static int static int
pcf8583_attach(struct i2c_adapter *adap, int addr, unsigned short flags, pcf8583_attach(struct i2c_adapter *adap, int addr, int kind)
int kind)
{ {
struct i2c_client *c; struct i2c_client *c;
unsigned char buf[1], ad[1] = { 0 }; unsigned char buf[1], ad[1] = { 0 };
...@@ -51,13 +50,11 @@ pcf8583_attach(struct i2c_adapter *adap, int addr, unsigned short flags, ...@@ -51,13 +50,11 @@ pcf8583_attach(struct i2c_adapter *adap, int addr, unsigned short flags,
if (!c) if (!c)
return -ENOMEM; return -ENOMEM;
strcpy(c->dev.name, "PCF8583"); memset(c, 0, sizeof(*c));
c->id = pcf8583_driver.id; c->id = pcf8583_driver.id;
c->flags = 0;
c->addr = addr; c->addr = addr;
c->adapter = adap; c->adapter = adap;
c->driver = &pcf8583_driver; c->driver = &pcf8583_driver;
c->dev.driver_data = NULL;
if (i2c_transfer(c->adapter, msgs, 2) == 2) if (i2c_transfer(c->adapter, msgs, 2) == 2)
DAT(c) = buf[0]; DAT(c) = buf[0];
......
...@@ -33,7 +33,6 @@ struct amba_kmi_port { ...@@ -33,7 +33,6 @@ struct amba_kmi_port {
unsigned int irq; unsigned int irq;
unsigned int divisor; unsigned int divisor;
unsigned int open; unsigned int open;
struct resource *res;
}; };
static irqreturn_t amba_kmi_int(int irq, void *dev_id, struct pt_regs *regs) static irqreturn_t amba_kmi_int(int irq, void *dev_id, struct pt_regs *regs)
...@@ -97,10 +96,17 @@ static void amba_kmi_close(struct serio *io) ...@@ -97,10 +96,17 @@ static void amba_kmi_close(struct serio *io)
static int amba_kmi_probe(struct amba_device *dev, void *id) static int amba_kmi_probe(struct amba_device *dev, void *id)
{ {
struct amba_kmi_port *kmi; struct amba_kmi_port *kmi;
int ret;
ret = amba_request_regions(dev, NULL);
if (ret)
return ret;
kmi = kmalloc(sizeof(struct amba_kmi_port), GFP_KERNEL); kmi = kmalloc(sizeof(struct amba_kmi_port), GFP_KERNEL);
if (!kmi) if (!kmi) {
return -ENOMEM; ret = -ENOMEM;
goto out;
}
memset(kmi, 0, sizeof(struct amba_kmi_port)); memset(kmi, 0, sizeof(struct amba_kmi_port));
...@@ -112,26 +118,24 @@ static int amba_kmi_probe(struct amba_device *dev, void *id) ...@@ -112,26 +118,24 @@ static int amba_kmi_probe(struct amba_device *dev, void *id)
kmi->io.phys = dev->dev.bus_id; kmi->io.phys = dev->dev.bus_id;
kmi->io.driver = kmi; kmi->io.driver = kmi;
kmi->res = request_mem_region(dev->res.start, KMI_SIZE, "kmi-pl050");
if (!kmi->res) {
kfree(kmi);
return -EBUSY;
}
kmi->base = ioremap(dev->res.start, KMI_SIZE); kmi->base = ioremap(dev->res.start, KMI_SIZE);
if (!kmi->base) { if (!kmi->base) {
release_resource(kmi->res); ret = -ENOMEM;
kfree(kmi); goto out;
return -ENOMEM;
} }
kmi->irq = dev->irq; kmi->irq = dev->irq[0];
kmi->divisor = 24 / 8 - 1; kmi->divisor = 24 / 8 - 1;
amba_set_drvdata(dev, kmi); amba_set_drvdata(dev, kmi);
serio_register_port(&kmi->io); serio_register_port(&kmi->io);
return 0; return 0;
out:
kfree(kmi);
amba_release_regions(dev);
return ret;
} }
static int amba_kmi_remove(struct amba_device *dev) static int amba_kmi_remove(struct amba_device *dev)
...@@ -142,8 +146,8 @@ static int amba_kmi_remove(struct amba_device *dev) ...@@ -142,8 +146,8 @@ static int amba_kmi_remove(struct amba_device *dev)
serio_unregister_port(&kmi->io); serio_unregister_port(&kmi->io);
iounmap(kmi->base); iounmap(kmi->base);
release_resource(kmi->res);
kfree(kmi); kfree(kmi);
amba_release_regions(dev);
return 0; return 0;
} }
......
...@@ -324,6 +324,29 @@ config SERIAL_UART00_CONSOLE ...@@ -324,6 +324,29 @@ config SERIAL_UART00_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.) kernel at boot time.)
config SERIAL_PXA
bool "PXA serial port support"
depends on ARM && ARCH_PXA
select SERIAL_CORE
help
If you have a machine based on an Intel XScale PXA2xx CPU you
can enable its onboard serial ports by enabling this option.
config SERIAL_PXA_CONSOLE
bool "Console on PXA serial port"
depends on SERIAL_PXA
select SERIAL_CORE_CONSOLE
help
If you have enabled the serial port on the Intel XScale PXA
CPU you can make it the console by answering Y to this option.
Even if you say Y here, the currently visible virtual console
(/dev/tty0) will still be used as the system console by default, but
you can alter that using a kernel command line option such as
"console=ttySA0". (Try "man bootparam" or see the documentation of
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
config SERIAL_SA1100 config SERIAL_SA1100
bool "SA1100 serial port support" bool "SA1100 serial port support"
depends on ARM && ARCH_SA1100 depends on ARM && ARCH_SA1100
......
...@@ -19,6 +19,7 @@ obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o ...@@ -19,6 +19,7 @@ obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
obj-$(CONFIG_SERIAL_ANAKIN) += anakin.o obj-$(CONFIG_SERIAL_ANAKIN) += anakin.o
obj-$(CONFIG_SERIAL_AMBA) += amba.o obj-$(CONFIG_SERIAL_AMBA) += amba.o
obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
obj-$(CONFIG_SERIAL_PXA) += pxa.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_UART00) += uart00.o obj-$(CONFIG_SERIAL_UART00) += uart00.o
obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o
......
/*
* linux/drivers/serial/pxa.c
*
* Based on drivers/serial/8250.c by Russell King.
*
* Author: Nicolas Pitre
* Created: Feb 20, 2003
* Copyright: (C) 2003 Monta Vista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Note 1: This driver is made separate from the already too overloaded
* 8250.c because it needs some kirks of its own and that'll make it
* easier to add DMA support.
*
* Note 2: I'm too sick of device allocation policies for serial ports.
* If someone else wants to request an "official" allocation of major/minor
* for this driver please be my guest. And don't forget that new hardware
* to come from Intel might have more than 3 or 4 of those UARTs. Let's
* hope for a better port registration and dynamic device allocation scheme
* with the serial core maintainer satisfaction to appear soon.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/serial.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#if defined(CONFIG_SERIAL_PXA_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
struct uart_pxa_port {
struct uart_port port;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
unsigned int lsr_break_flag;
unsigned int cken;
char *name;
};
static inline unsigned int serial_in(struct uart_pxa_port *up, int offset)
{
offset <<= 2;
return readl(up->port.membase + offset);
}
static inline void serial_out(struct uart_pxa_port *up, int offset, int value)
{
offset <<= 2;
writel(value, up->port.membase + offset);
}
static void serial_pxa_enable_ms(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
}
static void serial_pxa_stop_tx(struct uart_port *port, unsigned int tty_stop)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_pxa_stop_rx(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
static inline void
receive_chars(struct uart_pxa_port *up, int *status, struct pt_regs *regs)
{
struct tty_struct *tty = up->port.info->tty;
unsigned char ch;
int max_count = 256;
do {
if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
/*
* FIXME: Deadlock can happen here if we're a
* low-latency port. We're holding the per-port
* spinlock, and we call flush_to_ldisc->
* n_tty_receive_buf->n_tty_receive_char->
* opost->uart_put_char.
*/
tty->flip.work.func((void *)tty);
if (tty->flip.count >= TTY_FLIPBUF_SIZE)
return; // if TTY_DONT_FLIP is set
}
ch = serial_in(up, UART_RX);
*tty->flip.char_buf_ptr = ch;
*tty->flip.flag_buf_ptr = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE))) {
/*
* For statistics only
*/
if (*status & UART_LSR_BI) {
*status &= ~(UART_LSR_FE | UART_LSR_PE);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
goto ignore_char;
} else if (*status & UART_LSR_PE)
up->port.icount.parity++;
else if (*status & UART_LSR_FE)
up->port.icount.frame++;
if (*status & UART_LSR_OE)
up->port.icount.overrun++;
/*
* Mask off conditions which should be ignored.
*/
*status &= up->port.read_status_mask;
#ifdef CONFIG_SERIAL_PXA_CONSOLE
if (up->port.line == up->port.cons->index) {
/* Recover the break flag from console xmit */
*status |= up->lsr_break_flag;
up->lsr_break_flag = 0;
}
#endif
if (*status & UART_LSR_BI) {
*tty->flip.flag_buf_ptr = TTY_BREAK;
} else if (*status & UART_LSR_PE)
*tty->flip.flag_buf_ptr = TTY_PARITY;
else if (*status & UART_LSR_FE)
*tty->flip.flag_buf_ptr = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch, regs))
goto ignore_char;
if ((*status & up->port.ignore_status_mask) == 0) {
tty->flip.flag_buf_ptr++;
tty->flip.char_buf_ptr++;
tty->flip.count++;
}
if ((*status & UART_LSR_OE) &&
tty->flip.count < TTY_FLIPBUF_SIZE) {
/*
* Overrun is special, since it's reported
* immediately, and doesn't affect the current
* character.
*/
*tty->flip.flag_buf_ptr = TTY_OVERRUN;
tty->flip.flag_buf_ptr++;
tty->flip.char_buf_ptr++;
tty->flip.count++;
}
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
tty_flip_buffer_push(tty);
}
static void transmit_chars(struct uart_pxa_port *up)
{
struct circ_buf *xmit = &up->port.info->xmit;
int count;
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
serial_pxa_stop_tx(&up->port, 0);
return;
}
count = up->port.fifosize / 2;
do {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (uart_circ_empty(xmit))
serial_pxa_stop_tx(&up->port, 0);
}
static void serial_pxa_start_tx(struct uart_port *port, unsigned int tty_start)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static inline void check_modem_status(struct uart_pxa_port *up)
{
int status;
status = serial_in(up, UART_MSR);
if ((status & UART_MSR_ANY_DELTA) == 0)
return;
if (status & UART_MSR_TERI)
up->port.icount.rng++;
if (status & UART_MSR_DDSR)
up->port.icount.dsr++;
if (status & UART_MSR_DDCD)
uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
if (status & UART_MSR_DCTS)
uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
wake_up_interruptible(&up->port.info->delta_msr_wait);
}
/*
* This handles the interrupt from one port.
*/
static inline irqreturn_t
serial_pxa_irq(int irq, void *dev_id, struct pt_regs *regs)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)dev_id;
unsigned int iir, lsr;
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT)
return IRQ_NONE;
lsr = serial_in(up, UART_LSR);
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr, regs);
check_modem_status(up);
if (lsr & UART_LSR_THRE)
transmit_chars(up);
return IRQ_HANDLED;
}
static unsigned int serial_pxa_tx_empty(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
return ret;
}
static unsigned int serial_pxa_get_mctrl(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
unsigned char status;
unsigned int ret;
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
spin_lock_irqsave(&up->port.lock, flags);
status = serial_in(up, UART_MSR);
spin_unlock_irqrestore(&up->port.lock, flags);
ret = 0;
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char mcr = 0;
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
mcr |= up->mcr;
serial_out(up, UART_MCR, mcr);
}
static void serial_pxa_break_ctl(struct uart_port *port, int break_state)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
#if 0
static void serial_pxa_dma_init(struct pxa_uart *up)
{
up->rxdma =
pxa_request_dma(up->name, DMA_PRIO_LOW, pxa_receive_dma, up);
if (up->rxdma < 0)
goto out;
up->txdma =
pxa_request_dma(up->name, DMA_PRIO_LOW, pxa_transmit_dma, up);
if (up->txdma < 0)
goto err_txdma;
up->dmadesc = kmalloc(4 * sizeof(pxa_dma_desc), GFP_KERNEL);
if (!up->dmadesc)
goto err_alloc;
/* ... */
err_alloc:
pxa_free_dma(up->txdma);
err_rxdma:
pxa_free_dma(up->rxdma);
out:
return;
}
#endif
static int serial_pxa_startup(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
int retval;
up->mcr = 0;
/*
* Allocate the IRQ
*/
retval = request_irq(up->port.irq, serial_pxa_irq, 0, up->name, up);
if (retval)
return retval;
CKEN |= up->cken;
udelay(1);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
/*
* Clear the interrupt registers.
*/
(void) serial_in(up, UART_LSR);
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
/*
* Now, initialize the UART
*/
serial_out(up, UART_LCR, UART_LCR_WLEN8);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl |= TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occuring imminently
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE;
serial_out(up, UART_IER, up->ier);
/*
* And clear the interrupt registers again for luck.
*/
(void) serial_in(up, UART_LSR);
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
return 0;
}
static void serial_pxa_shutdown(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned long flags;
free_irq(up->port.irq, up);
/*
* Disable interrupts from this port
*/
up->ier = 0;
serial_out(up, UART_IER, 0);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/*
* Disable break condition and FIFOs
*/
serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
CKEN &= ~up->cken;
}
static void
serial_pxa_set_termios(struct uart_port *port, struct termios *termios,
struct termios *old)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
switch (termios->c_cflag & CSIZE) {
case CS5:
cval = 0x00;
break;
case CS6:
cval = 0x01;
break;
case CS7:
cval = 0x02;
break;
default:
case CS8:
cval = 0x03;
break;
}
if (termios->c_cflag & CSTOPB)
cval |= 0x04;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
if ((up->port.uartclk / quot) < (2400 * 16))
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1;
else
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
/*
* Ensure the port will be enabled.
* This is required especially for serial console.
*/
up->ier |= IER_UUE;
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, quot);
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_OE;
}
/*
* ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= UART_LSR_DR;
/*
* CTS flow control flag and modem status interrupts
*/
up->ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval; /* Save LCR */
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
serial_out(up, UART_FCR, fcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static void
serial_pxa_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
if (state) {
/* sleep */
} else {
/* wake */
}
}
static void serial_pxa_release_port(struct uart_port *port)
{
}
static int serial_pxa_request_port(struct uart_port *port)
{
return 0;
}
static void serial_pxa_config_port(struct uart_port *port, int flags)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
up->port.type = PORT_PXA;
}
static int
serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* we don't want the core code to modify any port params */
return -EINVAL;
}
static const char *
serial_pxa_type(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
return up->name;
}
#ifdef CONFIG_SERIAL_PXA_CONSOLE
extern struct uart_pxa_port serial_pxa_ports[];
extern struct uart_driver serial_pxa_reg;
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
/*
* Wait for transmitter & holding register to empty
*/
static inline void wait_for_xmitr(struct uart_pxa_port *up)
{
unsigned int status, tmout = 10000;
/* Wait up to 10ms for the character(s) to be sent. */
do {
status = serial_in(up, UART_LSR);
if (status & UART_LSR_BI)
up->lsr_break_flag = UART_LSR_BI;
if (--tmout == 0)
break;
udelay(1);
} while ((status & BOTH_EMPTY) != BOTH_EMPTY);
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
while (--tmout &&
((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
udelay(1);
}
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*/
static void
serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_pxa_port *up = &serial_pxa_ports[co->index];
unsigned int ier;
int i;
/*
* First save the UER then disable the interrupts
*/
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, UART_IER_UUE);
/*
* Now, do each character
*/
for (i = 0; i < count; i++, s++) {
wait_for_xmitr(up);
/*
* Send the character out.
* If a LF, also do CR...
*/
serial_out(up, UART_TX, *s);
if (*s == 10) {
wait_for_xmitr(up);
serial_out(up, UART_TX, 13);
}
}
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
}
static int __init
serial_pxa_console_setup(struct console *co, char *options)
{
struct uart_pxa_port *up;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index == -1 || co->index >= serial_pxa_reg.nr)
co->index = 0;
up = &serial_pxa_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_pxa_console = {
.name = "ttyS",
.write = serial_pxa_console_write,
.device = uart_console_device,
.setup = serial_pxa_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_pxa_reg,
};
static int __init
serial_pxa_console_init(void)
{
register_console(&serial_pxa_console);
return 0;
}
console_initcall(serial_pxa_console_init);
#define PXA_CONSOLE &serial_pxa_console
#else
#define PXA_CONSOLE NULL
#endif
struct uart_ops serial_pxa_pops = {
.tx_empty = serial_pxa_tx_empty,
.set_mctrl = serial_pxa_set_mctrl,
.get_mctrl = serial_pxa_get_mctrl,
.stop_tx = serial_pxa_stop_tx,
.start_tx = serial_pxa_start_tx,
.stop_rx = serial_pxa_stop_rx,
.enable_ms = serial_pxa_enable_ms,
.break_ctl = serial_pxa_break_ctl,
.startup = serial_pxa_startup,
.shutdown = serial_pxa_shutdown,
.set_termios = serial_pxa_set_termios,
.pm = serial_pxa_pm,
.type = serial_pxa_type,
.release_port = serial_pxa_release_port,
.request_port = serial_pxa_request_port,
.config_port = serial_pxa_config_port,
.verify_port = serial_pxa_verify_port,
};
static struct uart_pxa_port serial_pxa_ports[] = {
{ /* FFUART */
.name = "FFUART",
.cken = CKEN6_FFUART,
.port = {
.type = PORT_PXA,
.iotype = SERIAL_IO_MEM,
.membase = (void *)&FFUART,
.mapbase = __PREG(FFUART),
.irq = IRQ_FFUART,
.uartclk = 921600 * 16,
.fifosize = 64,
.flags = ASYNC_SKIP_TEST,
.ops = &serial_pxa_pops,
.line = 0,
},
}, { /* BTUART */
.name = "BTUART",
.cken = CKEN7_BTUART,
.port = {
.type = PORT_PXA,
.iotype = SERIAL_IO_MEM,
.membase = (void *)&BTUART,
.mapbase = __PREG(BTUART),
.irq = IRQ_BTUART,
.uartclk = 921600 * 16,
.fifosize = 64,
.flags = ASYNC_SKIP_TEST,
.ops = &serial_pxa_pops,
.line = 1,
},
}, { /* STUART */
.name = "STUART",
.cken = CKEN5_STUART,
.port = {
.type = PORT_PXA,
.iotype = SERIAL_IO_MEM,
.membase = (void *)&STUART,
.mapbase = __PREG(STUART),
.irq = IRQ_STUART,
.uartclk = 921600 * 16,
.fifosize = 64,
.flags = ASYNC_SKIP_TEST,
.ops = &serial_pxa_pops,
.line = 2,
},
}
};
static struct uart_driver serial_pxa_reg = {
.owner = THIS_MODULE,
.driver_name = "PXA serial",
.devfs_name = "tts/",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = ARRAY_SIZE(serial_pxa_ports),
.cons = PXA_CONSOLE,
};
static int __init serial_pxa_init(void)
{
int i, ret;
ret = uart_register_driver(&serial_pxa_reg);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(serial_pxa_ports); i++)
uart_add_one_port(&serial_pxa_reg, &serial_pxa_ports[i].port);
return 0;
}
static void __exit serial_pxa_exit(void)
{
uart_unregister_driver(&serial_pxa_reg);
}
module_init(serial_pxa_init);
module_exit(serial_pxa_exit);
MODULE_LICENSE("GPL");
...@@ -3,23 +3,10 @@ ...@@ -3,23 +3,10 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h> #include <linux/config.h>
#include <linux/dma-mapping.h> #include <asm-generic/pci-dma-compat.h>
#include <asm/hardware.h> /* for PCIBIOS_MIN_* */ #include <asm/hardware.h> /* for PCIBIOS_MIN_* */
#ifdef CONFIG_SA1111
/*
* Keep the SA1111 DMA-mapping tricks until the USB layer gets
* properly converted to the new DMA-mapping API, at which time
* most of this file can die.
*/
#define SA1111_FAKE_PCIDEV ((struct pci_dev *) 1111)
#define pcidev_is_sa1111(dev) (dev == SA1111_FAKE_PCIDEV)
#else
#define pcidev_is_sa1111(dev) (0)
#endif
#define pcibios_scan_all_fns(a, b) 0 #define pcibios_scan_all_fns(a, b) 0
static inline void pcibios_set_master(struct pci_dev *dev) static inline void pcibios_set_master(struct pci_dev *dev)
...@@ -39,144 +26,21 @@ static inline void pcibios_penalize_isa_irq(int irq) ...@@ -39,144 +26,21 @@ static inline void pcibios_penalize_isa_irq(int irq)
*/ */
#define PCI_DMA_BUS_IS_PHYS (0) #define PCI_DMA_BUS_IS_PHYS (0)
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle)
{
int gfp = GFP_ATOMIC;
if (hwdev == NULL || pcidev_is_sa1111(hwdev) ||
hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle, 0);
}
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
dma_addr_t handle)
{
dma_free_coherent(hwdev ? &hwdev->dev : NULL, size, vaddr, handle);
}
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int dir)
{
if (pcidev_is_sa1111(hwdev))
return sa1111_map_single(ptr, size, dir);
return dma_map_single(hwdev ? &hwdev->dev : NULL, ptr, size, dir);
}
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t handle, size_t size, int dir)
{
if (pcidev_is_sa1111(hwdev)) {
sa1111_unmap_single(handle, size, dir);
return;
}
dma_unmap_single(hwdev ? &hwdev->dev : NULL, handle, size, dir);
}
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dir)
{
if (pcidev_is_sa1111(hwdev))
return sa1111_map_sg(sg, nents, dir);
return dma_map_sg(hwdev ? &hwdev->dev : NULL, sg, nents, dir);
}
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dir)
{
if (pcidev_is_sa1111(hwdev)) {
sa1111_unmap_sg(sg, nents, dir);
return;
}
return dma_unmap_sg(hwdev ? &hwdev->dev : NULL, sg, nents, dir);
}
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page, unsigned long offset,
size_t size, int dir)
{
return pci_map_single(hwdev, page_address(page) + offset, size, dir);
}
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t handle, size_t size, int dir)
{
return pci_unmap_single(hwdev, handle, size, dir);
}
static inline void
pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t handle, size_t size, int dir)
{
if (pcidev_is_sa1111(hwdev)) {
sa1111_dma_sync_single(handle, size, dir);
return;
}
return dma_sync_single(hwdev ? &hwdev->dev : NULL, handle, size, dir);
}
static inline void
pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int dir)
{
if (pcidev_is_sa1111(hwdev)) {
sa1111_dma_sync_sg(sg, nelems, dir);
return;
}
return dma_sync_sg(hwdev ? &hwdev->dev : NULL, sg, nelems, dir);
}
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return 1;
}
/* /*
* We don't support DAC DMA cycles. * We don't support DAC DMA cycles.
*/ */
#define pci_dac_dma_supported(pci_dev, mask) (0) #define pci_dac_dma_supported(pci_dev, mask) (0)
#if defined(CONFIG_SA1111) && !defined(CONFIG_PCI)
/*
* SA-1111 needs these prototypes even when !defined(CONFIG_PCI)
*
* kmem_cache style wrapper around pci_alloc_consistent()
*/
struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
size_t size, size_t align, size_t allocation);
void pci_pool_destroy (struct pci_pool *pool);
void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr);
#endif
/* /*
* Whether pci_unmap_{single,page} is a nop depends upon the * Whether pci_unmap_{single,page} is a nop depends upon the
* configuration. * configuration.
*/ */
#if defined(CONFIG_PCI) || defined(CONFIG_SA1111)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) #define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) #define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
#else
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif
#define HAVE_PCI_MMAP #define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
* separate so any additions to the old serial.c that occur before * separate so any additions to the old serial.c that occur before
* we are merged can be easily merged here. * we are merged can be easily merged here.
*/ */
#define PORT_PXA 31
#define PORT_AMBA 32 #define PORT_AMBA 32
#define PORT_CLPS711X 33 #define PORT_CLPS711X 33
#define PORT_SA1100 34 #define PORT_SA1100 34
......
...@@ -140,6 +140,21 @@ ...@@ -140,6 +140,21 @@
#define UART_MSR_DCTS 0x01 /* Delta CTS */ #define UART_MSR_DCTS 0x01 /* Delta CTS */
#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ #define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */
/*
* The Intel PXA2xx chip defines those bits
*/
#define UART_IER_DMAE 0x80 /* DMA Requests Enable */
#define UART_IER_UUE 0x40 /* UART Unit Enable */
#define UART_IER_NRZE 0x20 /* NRZ coding Enable */
#define UART_IER_RTOIE 0x10 /* Receiver Time Out Interrupt Enable */
#define UART_IIR_TOD 0x08 /* Character Timeout Indication Detected */
#define UART_FCR_PXAR1 0x00 /* receive FIFO treshold = 1 */
#define UART_FCR_PXAR8 0x40 /* receive FIFO treshold = 8 */
#define UART_FCR_PXAR16 0x80 /* receive FIFO treshold = 16 */
#define UART_FCR_PXAR32 0xc0 /* receive FIFO treshold = 32 */
/* /*
* These are the definitions for the Extended Features Register * These are the definitions for the Extended Features Register
* (StarTech 16C660 only, when DLAB=1) * (StarTech 16C660 only, when DLAB=1)
......
...@@ -324,9 +324,10 @@ static int vidc_audio_prepare_for_input(int dev, int bsize, int bcount) ...@@ -324,9 +324,10 @@ static int vidc_audio_prepare_for_input(int dev, int bsize, int bcount)
return -EINVAL; return -EINVAL;
} }
static void vidc_audio_dma_interrupt(void) static irqreturn_t vidc_audio_dma_interrupt(void)
{ {
DMAbuf_outputintr(vidc_adev, 1); DMAbuf_outputintr(vidc_adev, 1);
return IRQ_HANDLED;
} }
/* /*
......
...@@ -50,7 +50,7 @@ extern unsigned long (*vidc_filler) (unsigned long ibuf, unsigned long iend, ...@@ -50,7 +50,7 @@ extern unsigned long (*vidc_filler) (unsigned long ibuf, unsigned long iend,
* Virtual DMA buffer exhausted * Virtual DMA buffer exhausted
*/ */
extern void (*dma_interrupt) (void); extern irqreturn_t (*dma_interrupt) (void);
/* /*
* Virtual DMA buffer addresses * Virtual DMA buffer addresses
......
...@@ -133,10 +133,6 @@ ENTRY(vidc_clear) ...@@ -133,10 +133,6 @@ ENTRY(vidc_clear)
* ip = corrupted * ip = corrupted
*/ */
%%%%%%%%%%%%%%%%%%%
fixme! This funtion needs to return IRQ_HANDLED
%%%%%%%%%%%%%%%%%%%
ENTRY(vidc_sound_dma_irq) ENTRY(vidc_sound_dma_irq)
stmfd sp!, {r4 - r8, lr} stmfd sp!, {r4 - r8, lr}
ldr r8, =dma_start ldr r8, =dma_start
...@@ -189,6 +185,7 @@ ENTRY(vidc_sound_dma_irq) ...@@ -189,6 +185,7 @@ ENTRY(vidc_sound_dma_irq)
mov r0, #0x10 mov r0, #0x10
strneb r0, [ip, #IOMD_SD0CR] strneb r0, [ip, #IOMD_SD0CR]
ldmfd sp!, {r4 - r8, lr} ldmfd sp!, {r4 - r8, lr}
mov r0, #1 @ IRQ_HANDLED
teq r1, #0 @ If we have no more teq r1, #0 @ If we have no more
movne pc, lr movne pc, lr
teq r3, #0 teq r3, #0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment