Commit f8c72e9e authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.6-rmk

into ppc970.osdl.org:/home/torvalds/v2.5/linux
parents c07ac043 f01324b3
......@@ -31,6 +31,7 @@ comma = ,
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
# testing for a specific architecture or later rather impossible.
arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 -march=armv5t -Wa,-march=armv6
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call check_gcc,-march=armv5te,-march=armv4)
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
......@@ -45,6 +46,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=$(call check_gcc,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_V6) :=-mtune=strongarm
# Need -Uarm for gcc < 3.x
CFLAGS_BOOT :=-mapcs-32 $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
......
......@@ -585,7 +585,7 @@ __armv3_cache_off:
* On entry,
* r6 = processor ID
* On exit,
* r1, r2, r3, r12 corrupted
* r1, r2, r3, r11, r12 corrupted
* This routine must preserve:
* r0, r4, r5, r6, r7
*/
......@@ -595,9 +595,25 @@ cache_clean_flush:
b call_cache_fn
__armv4_cache_flush:
bic r1, pc, #31
add r2, r1, #65536 @ 2x the largest dcache size
1: ldr r3, [r1], #32 @ s/w flush D cache
mov r2, #64*1024 @ default: 32K dcache size (*2)
mov r11, #32 @ default: 32 byte line size
mrc p15, 0, r3, c0, c0, 1 @ read cache type
teq r3, r6 @ cache ID register present?
beq no_cache_id
mov r1, r3, lsr #18
and r1, r1, #7
mov r2, #1024
mov r2, r2, lsl r1 @ base dcache size *2
tst r3, #1 << 14 @ test M bit
addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
mov r3, r3, lsr #12
and r3, r3, #3
mov r11, #8
mov r11, r11, lsl r3 @ cache line size in bytes
no_cache_id:
bic r1, pc, #63 @ align to longest cache line
add r2, r1, r2
1: ldr r3, [r1], r11 @ s/w flush D cache
teq r1, r2
bne 1b
......
......@@ -5,6 +5,6 @@
obj-y += platform.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o sa1111-pcipool.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o
obj-$(CONFIG_PCI_HOST_PLX90X0) += plx90x0.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
......@@ -315,16 +315,30 @@ amba_find_device(const char *busid, struct device *parent, unsigned int id,
return data.dev;
}
/**
* amba_request_regions - request all mem regions associated with device
* @dev: amba_device structure for device
* @name: name, or NULL to use driver name
*/
int amba_request_regions(struct amba_device *dev, const char *name)
{
int ret = 0;
if (!name)
name = dev->dev.driver->name;
if (!request_mem_region(dev->res.start, SZ_4K, name))
ret = -EBUSY;
return ret;
}
/**
* amba_release_regions - release mem regions assoicated with device
* @dev: amba_device structure for device
*
* Release regions claimed by a successful call to amba_request_regions.
*/
void amba_release_regions(struct amba_device *dev)
{
release_mem_region(dev->res.start, SZ_4K);
......
/*
* linux/arch/arm/mach-sa1100/pci-sa1111.c
* linux/arch/arm/mach-sa1100/sa1111-pcibuf.c
*
* Special pci_{map/unmap/dma_sync}_* routines for SA-1111.
* Special dma_{map/unmap/dma_sync}_* routines for SA-1111.
*
* These functions utilize bouncer buffers to compensate for a bug in
* the SA-1111 hardware which don't allow DMA to/from addresses
......@@ -17,20 +17,17 @@
* version 2 as published by the Free Software Foundation.
* */
//#define DEBUG
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <asm/hardware/sa1111.h>
//#define DEBUG
#ifdef DEBUG
#define DPRINTK(...) do { printk(KERN_DEBUG __VA_ARGS__); } while (0)
#else
#define DPRINTK(...) do { } while (0)
#endif
//#define STATS
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
......@@ -46,12 +43,13 @@ struct safe_buffer {
/* original request */
void *ptr;
size_t size;
int direction;
enum dma_data_direction direction;
/* safe buffer info */
struct pci_pool *pool;
struct dma_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
struct device *dev;
};
static LIST_HEAD(safe_buffers);
......@@ -60,7 +58,7 @@ static LIST_HEAD(safe_buffers);
#define SIZE_SMALL 1024
#define SIZE_LARGE (4*1024)
static struct pci_pool *small_buffer_pool, *large_buffer_pool;
static struct dma_pool *small_buffer_pool, *large_buffer_pool;
#ifdef STATS
static unsigned long sbp_allocs __initdata = 0;
......@@ -70,95 +68,90 @@ static unsigned long total_allocs __initdata= 0;
static void print_alloc_stats(void)
{
printk(KERN_INFO
"sa1111_pcibuf: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
"sa1111_dmabuf: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
sbp_allocs, lbp_allocs,
total_allocs - sbp_allocs - lbp_allocs, total_allocs);
}
#endif
static int __init
create_safe_buffer_pools(void)
static int __init create_safe_buffer_pools(void)
{
small_buffer_pool = pci_pool_create("sa1111_small_dma_buffer",
SA1111_FAKE_PCIDEV,
SIZE_SMALL,
small_buffer_pool = dma_pool_create("sa1111_small_dma_buffer",
NULL, SIZE_SMALL,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (0 == small_buffer_pool) {
if (small_buffer_pool == NULL) {
printk(KERN_ERR
"sa1111_pcibuf: could not allocate small pci pool\n");
return -1;
"sa1111_dmabuf: could not allocate small pci pool\n");
return -ENOMEM;
}
large_buffer_pool = pci_pool_create("sa1111_large_dma_buffer",
SA1111_FAKE_PCIDEV,
SIZE_LARGE,
large_buffer_pool = dma_pool_create("sa1111_large_dma_buffer",
NULL, SIZE_LARGE,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (0 == large_buffer_pool) {
if (large_buffer_pool == NULL) {
printk(KERN_ERR
"sa1111_pcibuf: could not allocate large pci pool\n");
pci_pool_destroy(small_buffer_pool);
small_buffer_pool = 0;
return -1;
"sa1111_dmabuf: could not allocate large pci pool\n");
dma_pool_destroy(small_buffer_pool);
small_buffer_pool = NULL;
return -ENOMEM;
}
printk(KERN_INFO
"sa1111_pcibuf: buffer sizes: small=%u, large=%u\n",
printk(KERN_INFO "SA1111: DMA buffer sizes: small=%u, large=%u\n",
SIZE_SMALL, SIZE_LARGE);
return 0;
}
static void __exit
destroy_safe_buffer_pools(void)
static void __exit destroy_safe_buffer_pools(void)
{
if (small_buffer_pool)
pci_pool_destroy(small_buffer_pool);
dma_pool_destroy(small_buffer_pool);
if (large_buffer_pool)
pci_pool_destroy(large_buffer_pool);
dma_pool_destroy(large_buffer_pool);
small_buffer_pool = large_buffer_pool = 0;
small_buffer_pool = large_buffer_pool = NULL;
}
/* allocate a 'safe' buffer and keep track of it */
static struct safe_buffer *
alloc_safe_buffer(void *ptr, size_t size, int direction)
static struct safe_buffer *alloc_safe_buffer(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir)
{
struct safe_buffer *buf;
struct pci_pool *pool;
struct dma_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
DPRINTK("%s(ptr=%p, size=%d, direction=%d)\n",
__func__, ptr, size, direction);
dev_dbg(dev, "%s(ptr=%p, size=%d, direction=%d)\n",
__func__, ptr, size, dir);
DO_STATS ( total_allocs++ );
buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
if (buf == 0) {
if (buf == NULL) {
printk(KERN_WARNING "%s: kmalloc failed\n", __func__);
return 0;
}
if (size <= SIZE_SMALL) {
pool = small_buffer_pool;
safe = pci_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
DO_STATS ( sbp_allocs++ );
} else if (size <= SIZE_LARGE) {
pool = large_buffer_pool;
safe = pci_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
DO_STATS ( lbp_allocs++ );
} else {
pool = 0;
safe = pci_alloc_consistent(SA1111_FAKE_PCIDEV, size,
&safe_dma_addr);
pool = NULL;
safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC);
}
if (safe == 0) {
if (safe == NULL) {
printk(KERN_WARNING
"%s: could not alloc dma memory (size=%d)\n",
__func__, size);
......@@ -175,20 +168,20 @@ alloc_safe_buffer(void *ptr, size_t size, int direction)
buf->ptr = ptr;
buf->size = size;
buf->direction = direction;
buf->direction = dir;
buf->pool = pool;
buf->safe = safe;
buf->safe_dma_addr = safe_dma_addr;
buf->dev = dev;
MOD_INC_USE_COUNT;
list_add(&buf->node, &safe_buffers);
return buf;
}
/* determine if a buffer is from our "safe" pool */
static struct safe_buffer *
find_safe_buffer(dma_addr_t safe_dma_addr)
static struct safe_buffer *find_safe_buffer(struct device *dev,
dma_addr_t safe_dma_addr)
{
struct list_head *entry;
......@@ -196,7 +189,8 @@ find_safe_buffer(dma_addr_t safe_dma_addr)
struct safe_buffer *b =
list_entry(entry, struct safe_buffer, node);
if (b->safe_dma_addr == safe_dma_addr) {
if (b->safe_dma_addr == safe_dma_addr &&
b->dev == dev) {
return b;
}
}
......@@ -204,25 +198,22 @@ find_safe_buffer(dma_addr_t safe_dma_addr)
return 0;
}
static void
free_safe_buffer(struct safe_buffer *buf)
static void free_safe_buffer(struct safe_buffer *buf)
{
DPRINTK("%s(buf=%p)\n", __func__, buf);
pr_debug("%s(buf=%p)\n", __func__, buf);
list_del(&buf->node);
if (buf->pool)
pci_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
else
pci_free_consistent(SA1111_FAKE_PCIDEV, buf->size, buf->safe,
dma_free_coherent(buf->dev, buf->size, buf->safe,
buf->safe_dma_addr);
kfree(buf);
MOD_DEC_USE_COUNT;
}
static inline int
dma_range_is_safe(dma_addr_t addr, size_t size)
static inline int dma_range_is_safe(struct device *dev, dma_addr_t addr,
size_t size)
{
unsigned int physaddr = SA1111_DMA_ADDR((unsigned int) addr);
......@@ -248,13 +239,13 @@ static unsigned long bounce_count __initdata = 0;
static void print_map_stats(void)
{
printk(KERN_INFO
"sa1111_pcibuf: map_op_count=%lu, bounce_count=%lu\n",
"sa1111_dmabuf: map_op_count=%lu, bounce_count=%lu\n",
map_op_count, bounce_count);
}
#endif
static dma_addr_t
map_single(void *ptr, size_t size, int direction)
static dma_addr_t map_single(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir)
{
dma_addr_t dma_addr;
......@@ -262,37 +253,36 @@ map_single(void *ptr, size_t size, int direction)
dma_addr = virt_to_bus(ptr);
if (!dma_range_is_safe(dma_addr, size)) {
if (!dma_range_is_safe(dev, dma_addr, size)) {
struct safe_buffer *buf;
DO_STATS ( bounce_count++ ) ;
buf = alloc_safe_buffer(ptr, size, direction);
if (buf == 0) {
buf = alloc_safe_buffer(dev, ptr, size, dir);
if (buf == NULL) {
printk(KERN_ERR
"%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return 0;
}
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08x)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
buf->ptr, virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr);
if ((direction == PCI_DMA_TODEVICE) ||
(direction == PCI_DMA_BIDIRECTIONAL)) {
DPRINTK("%s: copy out from unsafe %p, to safe %p, size %d\n",
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy out from unsafe %p, to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
consistent_sync(buf->safe, size, direction);
dma_addr = buf->safe_dma_addr;
} else {
consistent_sync(ptr, size, direction);
ptr = buf->safe;
}
consistent_sync(ptr, size, dir);
#ifdef STATS
if (map_op_count % 1000 == 0)
print_map_stats();
......@@ -301,28 +291,26 @@ map_single(void *ptr, size_t size, int direction)
return dma_addr;
}
static void
unmap_single(dma_addr_t dma_addr, size_t size, int direction)
static void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf;
buf = find_safe_buffer(dma_addr);
buf = find_safe_buffer(dev, dma_addr);
if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != direction);
BUG_ON(buf->direction != dir);
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
buf->ptr, virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS ( bounce_count++ );
if ((direction == PCI_DMA_FROMDEVICE) ||
(direction == PCI_DMA_BIDIRECTIONAL)) {
DPRINTK("%s: copy back from safe %p, to unsafe %p size %d\n",
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy back from safe %p, to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size);
}
......@@ -330,44 +318,46 @@ unmap_single(dma_addr_t dma_addr, size_t size, int direction)
}
}
static void
sync_single(dma_addr_t dma_addr, size_t size, int direction)
static void sync_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf;
void *ptr;
buf = find_safe_buffer(dma_addr);
buf = find_safe_buffer(dev, dma_addr);
if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != direction);
BUG_ON(buf->direction != dir);
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
buf->ptr, virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS ( bounce_count++ );
switch (direction) {
case PCI_DMA_FROMDEVICE:
DPRINTK("%s: copy back from safe %p, to unsafe %p size %d\n",
switch (dir) {
case DMA_FROM_DEVICE:
dev_dbg(dev, "%s: copy back from safe %p, to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size);
break;
case PCI_DMA_TODEVICE:
DPRINTK("%s: copy out from unsafe %p, to safe %p, size %d\n",
case DMA_TO_DEVICE:
dev_dbg(dev, "%s: copy out from unsafe %p, to safe %p, size %d\n",
__func__,buf->ptr, buf->safe, size);
memcpy(buf->safe, buf->ptr, size);
break;
case PCI_DMA_BIDIRECTIONAL:
case DMA_BIDIRECTIONAL:
BUG(); /* is this allowed? what does it mean? */
default:
BUG();
}
consistent_sync(buf->safe, size, direction);
ptr = buf->safe;
} else {
consistent_sync(bus_to_virt(dma_addr), size, direction);
ptr = bus_to_virt(dma_addr);
}
consistent_sync(ptr, size, dir);
}
/* ************************************************** */
......@@ -378,20 +368,20 @@ sync_single(dma_addr_t dma_addr, size_t size, int direction)
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t
sa1111_map_single(void *ptr, size_t size, int direction)
dma_addr_t sa1111_map_single(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir)
{
unsigned long flags;
dma_addr_t dma_addr;
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, direction);
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, dir);
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags);
dma_addr = map_single(ptr, size, direction);
dma_addr = map_single(dev, ptr, size, dir);
local_irq_restore(flags);
......@@ -404,34 +394,31 @@ sa1111_map_single(void *ptr, size_t size, int direction)
* the safe buffer. (basically return things back to the way they
* should be)
*/
void
sa1111_unmap_single(dma_addr_t dma_addr, size_t size, int direction)
void sa1111_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long flags;
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, direction);
BUG_ON(direction == PCI_DMA_NONE);
dev_dbg(dev, "%s(ptr=%08lx,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);
local_irq_save(flags);
unmap_single(dma_addr, size, direction);
unmap_single(dev, dma_addr, size, dir);
local_irq_restore(flags);
}
int
sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
unsigned long flags;
int i;
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags);
......@@ -441,8 +428,7 @@ sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
unsigned int length = sg->length;
void *ptr = page_address(page) + offset;
sg->dma_address =
map_single(ptr, length, direction);
sg->dma_address = map_single(dev, ptr, length, dir);
}
local_irq_restore(flags);
......@@ -450,16 +436,14 @@ sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
return nents;
}
void
sa1111_unmap_sg(struct scatterlist *sg, int nents, int direction)
void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
unsigned long flags;
int i;
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
BUG_ON(direction == PCI_DMA_NONE);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
local_irq_save(flags);
......@@ -467,37 +451,35 @@ sa1111_unmap_sg(struct scatterlist *sg, int nents, int direction)
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
unmap_single(dma_addr, length, direction);
unmap_single(dev, dma_addr, length, dir);
}
local_irq_restore(flags);
}
void
sa1111_dma_sync_single(dma_addr_t dma_addr, size_t size, int direction)
void sa1111_dma_sync_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long flags;
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, direction);
dev_dbg(dev, "%s(ptr=%08lx,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);
local_irq_save(flags);
sync_single(dma_addr, size, direction);
sync_single(dev, dma_addr, size, dir);
local_irq_restore(flags);
}
void
sa1111_dma_sync_sg(struct scatterlist *sg, int nents, int direction)
void sa1111_dma_sync_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
unsigned long flags;
int i;
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
BUG_ON(direction == PCI_DMA_NONE);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
local_irq_save(flags);
......@@ -505,7 +487,7 @@ sa1111_dma_sync_sg(struct scatterlist *sg, int nents, int direction)
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
sync_single(dma_addr, length, direction);
sync_single(dev, dma_addr, length, dir);
}
local_irq_restore(flags);
......@@ -520,20 +502,15 @@ EXPORT_SYMBOL(sa1111_dma_sync_sg);
/* **************************************** */
static int __init sa1111_pcibuf_init(void)
static int __init sa1111_dmabuf_init(void)
{
int ret;
printk(KERN_DEBUG
"sa1111_pcibuf: initializing SA-1111 DMA workaround\n");
ret = create_safe_buffer_pools();
printk(KERN_DEBUG "sa1111_dmabuf: initializing SA-1111 DMA buffers\n");
return ret;
return create_safe_buffer_pools();
}
module_init(sa1111_pcibuf_init);
module_init(sa1111_dmabuf_init);
static void __exit sa1111_pcibuf_exit(void)
static void __exit sa1111_dmabuf_exit(void)
{
BUG_ON(!list_empty(&safe_buffers));
......@@ -544,8 +521,8 @@ static void __exit sa1111_pcibuf_exit(void)
destroy_safe_buffer_pools();
}
module_exit(sa1111_pcibuf_exit);
module_exit(sa1111_dmabuf_exit);
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>");
MODULE_DESCRIPTION("Special pci_{map/unmap/dma_sync}_* routines for SA-1111.");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for SA-1111.");
MODULE_LICENSE("GPL");
/*
NOTE:
this code was lifted straight out of drivers/pci/pci.c;
when compiling for the Intel StrongARM SA-1110/SA-1111 the
usb-ohci.c driver needs these routines even when the architecture
has no pci bus...
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <asm/page.h>
/*
* Pool allocator ... wraps the pci_alloc_consistent page allocator, so
* small blocks are easily used by drivers for bus mastering controllers.
* This should probably be sharing the guts of the slab allocator.
*/
struct pci_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
size_t blocks_per_page;
size_t size;
struct pci_dev *dev;
size_t allocation;
char name [32];
wait_queue_head_t waitq;
};
struct pci_page { /* cacheable header for 'allocation' bytes */
struct list_head page_list;
void *vaddr;
dma_addr_t dma;
unsigned long bitmap [0];
};
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
#define POOL_POISON_BYTE 0xa7
// #define CONFIG_PCIPOOL_DEBUG
static inline const char *slot_name(const struct pci_pool *pool)
{
struct pci_dev *pdev = (struct pci_dev *)pool->dev;
if (pdev == 0)
return "[0]";
else if (pcidev_is_sa1111(pdev))
return "[SA-1111]";
else
return pci_name(pdev);
}
/**
* pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
* @name: name of pool, for diagnostics
* @pdev: pci device that will be doing the DMA
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @allocation: returned blocks won't cross this boundary (or zero)
* Context: !in_interrupt()
*
* Returns a pci allocation pool with the requested characteristics, or
* null if one can't be created. Given one of these pools, pci_pool_alloc()
* may be used to allocate memory. Such memory will all have "consistent"
* DMA mappings, accessible by the device and its driver without using
* cache flushing primitives. The actual size of blocks allocated may be
* larger than requested because of alignment.
*
* If allocation is nonzero, objects returned from pci_pool_alloc() won't
* cross that size boundary. This is useful for devices which have
* addressing restrictions on individual DMA transfers, such as not crossing
* boundaries of 4KBytes.
*/
struct pci_pool *
pci_pool_create (const char *name, struct pci_dev *pdev,
size_t size, size_t align, size_t allocation)
{
struct pci_pool *retval;
if (align == 0)
align = 1;
if (size == 0)
return 0;
else if (size < align)
size = align;
else if ((size % align) != 0) {
size += align + 1;
size &= ~(align - 1);
}
if (allocation == 0) {
if (PAGE_SIZE < size)
allocation = size;
else
allocation = PAGE_SIZE;
// FIXME: round up for less fragmentation
} else if (allocation < size)
return 0;
if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
return retval;
strlcpy (retval->name, name, sizeof retval->name);
retval->dev = pdev;
INIT_LIST_HEAD (&retval->page_list);
spin_lock_init (&retval->lock);
retval->size = size;
retval->allocation = allocation;
retval->blocks_per_page = allocation / size;
init_waitqueue_head (&retval->waitq);
#ifdef CONFIG_PCIPOOL_DEBUG
printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
slot_name(retval), retval->name, size,
retval->blocks_per_page, allocation);
#endif
return retval;
}
static struct pci_page *
pool_alloc_page (struct pci_pool *pool, int mem_flags)
{
struct pci_page *page;
int mapsize;
mapsize = pool->blocks_per_page;
mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
mapsize *= sizeof (long);
page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
if (!page)
return 0;
page->vaddr = pci_alloc_consistent (pool->dev,
pool->allocation,
&page->dma);
if (page->vaddr) {
memset (page->bitmap, 0xff, mapsize); // bit set == free
#ifdef CONFIG_DEBUG_SLAB
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
#endif
list_add (&page->page_list, &pool->page_list);
} else {
kfree (page);
page = 0;
}
return page;
}
static inline int
is_page_busy (int blocks, unsigned long *bitmap)
{
while (blocks > 0) {
if (*bitmap++ != ~0UL)
return 1;
blocks -= BITS_PER_LONG;
}
return 0;
}
static void
pool_free_page (struct pci_pool *pool, struct pci_page *page)
{
dma_addr_t dma = page->dma;
#ifdef CONFIG_DEBUG_SLAB
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
#endif
pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
list_del (&page->page_list);
kfree (page);
}
/**
* pci_pool_destroy - destroys a pool of pci memory blocks.
* @pool: pci pool that will be destroyed
*
* Caller guarantees that no more memory from the pool is in use,
* and that nothing will try to use the pool after this call.
*/
void
pci_pool_destroy (struct pci_pool *pool)
{
unsigned long flags;
#ifdef CONFIG_PCIPOOL_DEBUG
printk (KERN_DEBUG "pcipool destroy %s/%s\n",
slot_name(pool), pool->name);
#endif
spin_lock_irqsave (&pool->lock, flags);
while (!list_empty (&pool->page_list)) {
struct pci_page *page;
page = list_entry (pool->page_list.next,
struct pci_page, page_list);
if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
slot_name(pool), pool->name, page->vaddr);
/* leak the still-in-use consistent memory */
list_del (&page->page_list);
kfree (page);
} else
pool_free_page (pool, page);
}
spin_unlock_irqrestore (&pool->lock, flags);
kfree (pool);
}
/**
* pci_pool_alloc - get a block of consistent memory
* @pool: pci pool that will produce the block
* @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
* @handle: pointer to dma address of block
*
* This returns the kernel virtual address of a currently unused block,
* and reports its dma address through the handle.
* If such a memory block can't be allocated, null is returned.
*/
void *
pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
{
unsigned long flags;
struct list_head *entry;
struct pci_page *page;
int map, block;
size_t offset;
void *retval;
restart:
spin_lock_irqsave (&pool->lock, flags);
list_for_each (entry, &pool->page_list) {
int i;
page = list_entry (entry, struct pci_page, page_list);
/* only cachable accesses here ... */
for (map = 0, i = 0;
i < pool->blocks_per_page;
i += BITS_PER_LONG, map++) {
if (page->bitmap [map] == 0)
continue;
block = ffz (~ page->bitmap [map]);
if ((i + block) < pool->blocks_per_page) {
clear_bit (block, &page->bitmap [map]);
offset = (BITS_PER_LONG * map) + block;
offset *= pool->size;
goto ready;
}
}
}
if (!(page = pool_alloc_page (pool, mem_flags))) {
if (mem_flags == SLAB_KERNEL) {
DECLARE_WAITQUEUE (wait, current);
current->state = TASK_INTERRUPTIBLE;
add_wait_queue (&pool->waitq, &wait);
spin_unlock_irqrestore (&pool->lock, flags);
schedule_timeout (POOL_TIMEOUT_JIFFIES);
remove_wait_queue (&pool->waitq, &wait);
goto restart;
}
retval = 0;
goto done;
}
clear_bit (0, &page->bitmap [0]);
offset = 0;
ready:
retval = offset + page->vaddr;
*handle = offset + page->dma;
done:
spin_unlock_irqrestore (&pool->lock, flags);
return retval;
}
static struct pci_page *
pool_find_page (struct pci_pool *pool, dma_addr_t dma)
{
unsigned long flags;
struct list_head *entry;
struct pci_page *page;
spin_lock_irqsave (&pool->lock, flags);
list_for_each (entry, &pool->page_list) {
page = list_entry (entry, struct pci_page, page_list);
if (dma < page->dma)
continue;
if (dma < (page->dma + pool->allocation))
goto done;
}
page = 0;
done:
spin_unlock_irqrestore (&pool->lock, flags);
return page;
}
/**
* pci_pool_free - put block back into pci pool
* @pool: the pci pool holding the block
* @vaddr: virtual address of block
* @dma: dma address of block
*
* Caller promises neither device nor driver will again touch this block
* unless it is first re-allocated.
*/
void
pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
{
struct pci_page *page;
unsigned long flags;
int map, block;
if ((page = pool_find_page (pool, dma)) == 0) {
printk (KERN_ERR "pci_pool_free %s/%s, %p/%lx (bad dma)\n",
pool->dev ? pci_name(pool->dev) : NULL,
pool->name, vaddr, (unsigned long) dma);
return;
}
block = dma - page->dma;
block /= pool->size;
map = block / BITS_PER_LONG;
block %= BITS_PER_LONG;
#ifdef CONFIG_DEBUG_SLAB
if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%lx\n",
pool->dev ? pci_name(pool->dev) : NULL,
pool->name, vaddr, (unsigned long) dma);
return;
}
if (page->bitmap [map] & (1UL << block)) {
printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
pool->dev ? pci_name(pool->dev) : NULL,
pool->name, dma);
return;
}
memset (vaddr, POOL_POISON_BYTE, pool->size);
#endif
spin_lock_irqsave (&pool->lock, flags);
set_bit (block, &page->bitmap [map]);
if (waitqueue_active (&pool->waitq))
wake_up (&pool->waitq);
/*
* Resist a temptation to do
* if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
* it is not interrupt safe. Better have empty pages hang around.
*/
spin_unlock_irqrestore (&pool->lock, flags);
}
EXPORT_SYMBOL (pci_pool_create);
EXPORT_SYMBOL (pci_pool_destroy);
EXPORT_SYMBOL (pci_pool_alloc);
EXPORT_SYMBOL (pci_pool_free);
/* **************************************** */
static int __init pcipool_init(void)
{
MOD_INC_USE_COUNT; /* never unload */
return 0;
}
module_init(pcipool_init);
MODULE_LICENSE("GPL");
......@@ -11,7 +11,6 @@ obj-y := arch.o compat.o dma.o entry-armv.o entry-common.o irq.o \
time.o traps.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_ARCH_ACORN) += ecard.o time-acorn.o
obj-$(CONFIG_ARCH_CLPS7500) += time-acorn.o
obj-$(CONFIG_FOOTBRIDGE) += isa.o
......
......@@ -47,6 +47,10 @@ int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
#if __LINUX_ARM_ARCH__ >= 6
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
#endif
BLANK();
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
BLANK();
......
......@@ -66,6 +66,15 @@
msr cpsr_c, \mode
.endm
#if __LINUX_ARM_ARCH__ >= 6
.macro disable_irq, temp
cpsid i
.endm
.macro enable_irq, temp
cpsie i
.endm
#else
.macro disable_irq, temp
set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC
.endm
......@@ -73,6 +82,7 @@
.macro enable_irq, temp
set_cpsr_c \temp, #MODE_SVC
.endm
#endif
.macro save_user_regs
sub sp, sp, #S_FRAME_SIZE
......
......@@ -31,6 +31,7 @@
#include <linux/seq_file.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <asm/irq.h>
#include <asm/system.h>
......@@ -225,6 +226,34 @@ static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
}
static void
report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
{
static int count = 100;
struct irqaction *action;
if (!count)
return;
count--;
if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
printk("irq%u: bogus retval mask %x\n", irq, ret);
} else {
printk("irq%u: nobody cared\n", irq);
}
show_regs(regs);
dump_stack();
printk(KERN_ERR "handlers:");
action = desc->action;
do {
printk("\n" KERN_ERR "[<%p>]", action->handler);
print_symbol(" (%s)", (unsigned long)action->handler);
action = action->next;
} while (action);
printk("\n");
}
static int
__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
{
unsigned int status;
......@@ -247,18 +276,7 @@ __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
spin_lock_irq(&irq_controller_lock);
if (retval != 1) {
static int count = 100;
if (count) {
count--;
if (retval) {
printk("irq event %d: bogus retval mask %x\n",
irq, retval);
} else {
printk("irq %d: nobody cared\n", irq);
}
}
}
return retval;
}
/*
......@@ -276,8 +294,11 @@ do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
kstat_cpu(cpu).irqs[irq]++;
action = desc->action;
if (action)
__do_irq(irq, desc->action, regs);
if (action) {
int ret = __do_irq(irq, action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
}
}
/*
......@@ -313,6 +334,7 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
do {
struct irqaction *action;
int ret;
action = desc->action;
if (!action)
......@@ -323,7 +345,9 @@ do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
desc->chip->unmask(irq);
}
__do_irq(irq, action, regs);
ret = __do_irq(irq, action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
} while (desc->pending && !desc->disable_depth);
desc->running = 0;
......@@ -368,7 +392,10 @@ do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
*/
action = desc->action;
if (action) {
__do_irq(irq, desc->action, regs);
int ret = __do_irq(irq, desc->action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
if (likely(!desc->disable_depth &&
!check_irq_lock(desc, irq, regs)))
......
/*
* linux/arch/arm/kernel/suspend.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*
* This is the common support code for suspending an ARM machine.
* pm_do_suspend() is responsible for actually putting the CPU to
* sleep.
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/pm.h>
#include <linux/errno.h>
#include <linux/sched.h>
#ifdef CONFIG_SYSCTL
/*
* We really want this to die. It's a disgusting hack using unallocated
* sysctl numbers. We should be using a real interface.
*/
static int
pm_sysctl_proc_handler(ctl_table *ctl, int write, struct file *filp,
void *buffer, size_t *lenp)
{
int ret = -EIO;
printk("PM: task %s (pid %d) uses deprecated sysctl PM interface\n",
current->comm, current->pid);
if (write)
ret = pm_suspend(PM_SUSPEND_MEM);
return ret;
}
/*
* This came from arch/arm/mach-sa1100/pm.c:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
* with modifications by Nicolas Pitre and Russell King.
*
* ARGH! ACPI people defined CTL_ACPI in linux/acpi.h rather than
* linux/sysctl.h.
*
* This means our interface here won't survive long - it needs a new
* interface. Quick hack to get this working - use sysctl id 9999.
*/
#warning ACPI broke the kernel, this interface needs to be fixed up.
#define CTL_ACPI 9999
#define ACPI_S1_SLP_TYP 19
static struct ctl_table pm_table[] =
{
{
.ctl_name = ACPI_S1_SLP_TYP,
.procname = "suspend",
.mode = 0200,
.proc_handler = pm_sysctl_proc_handler,
},
{0}
};
static struct ctl_table pm_dir_table[] =
{
{
.ctl_name = CTL_ACPI,
.procname = "pm",
.mode = 0555,
.child = pm_table,
},
{0}
};
/*
* Initialize power interface
*/
static int __init pm_init(void)
{
register_sysctl_table(pm_dir_table, 1);
return 0;
}
fs_initcall(pm_init);
#endif
......@@ -118,21 +118,21 @@ static struct resource io_res[] = {
#define lp2 io_res[2]
static const char *cache_types[16] = {
"write-through",
"write-back",
"write-back",
"VIVT write-through",
"VIVT write-back",
"VIVT write-back",
"undefined 3",
"undefined 4",
"undefined 5",
"write-back",
"write-back",
"VIVT write-back",
"VIVT write-back",
"undefined 8",
"undefined 9",
"undefined 10",
"undefined 11",
"undefined 12",
"undefined 13",
"undefined 14",
"VIPT write-back",
"undefined 15",
};
......@@ -151,7 +151,7 @@ static const char *cache_clean[16] = {
"undefined 11",
"undefined 12",
"undefined 13",
"undefined 14",
"cp15 c7 ops",
"undefined 15",
};
......@@ -170,7 +170,7 @@ static const char *cache_lockdown[16] = {
"undefined 11",
"undefined 12",
"undefined 13",
"undefined 14",
"format C",
"undefined 15",
};
......@@ -183,7 +183,7 @@ static const char *proc_arch[] = {
"5T",
"5TE",
"5TEJ",
"?(9)",
"6TEJ",
"?(10)",
"?(11)",
"?(12)",
......
......@@ -19,10 +19,24 @@ ENTRY(__raw_readsl)
ands ip, r1, #3
bne 2f
1: ldr r3, [r0]
str r3, [r1], #4
subs r2, r2, #1
bne 1b
subs r2, r2, #4
bmi 1001f
stmfd sp!, {r4, lr}
1000: ldr r3, [r0, #0]
ldr r4, [r0, #0]
ldr ip, [r0, #0]
ldr lr, [r0, #0]
subs r2, r2, #4
stmia r1!, {r3, r4, ip, lr}
bpl 1000b
ldmfd sp!, {r4, lr}
1001: tst r2, #2
ldrne r3, [r0, #0]
ldrne ip, [r0, #0]
stmneia r1!, {r3, ip}
tst r2, #1
ldrne r3, [r0, #0]
strne r3, [r1, #0]
mov pc, lr
2: cmp ip, #2
......
......@@ -18,6 +18,7 @@
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <asm/div64.h>
#include <asm/hardware.h>
#include <asm/system.h>
#include <asm/pgtable.h>
......@@ -111,6 +112,21 @@ unsigned int cpufreq_get(unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get);
#endif
/*
* This is the SA11x0 sched_clock implementation. This has
* a resolution of 271ns, and a maximum value of 1165s.
* ( * 1E9 / 3686400 => * 78125 / 288)
*/
unsigned long long sched_clock(void)
{
unsigned long long v;
v = (unsigned long long)OSCR * 78125;
do_div(v, 288);
return v;
}
/*
* Default power-off for SA1100
*/
......@@ -151,6 +167,36 @@ static struct platform_device sa11x0udc_device = {
.resource = sa11x0udc_resources,
};
static struct resource sa11x0uart1_resources[] = {
[0] = {
.start = 0x80010000,
.end = 0x8001ffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device sa11x0uart1_device = {
.name = "sa11x0-uart",
.id = 1,
.num_resources = ARRAY_SIZE(sa11x0uart1_resources),
.resource = sa11x0uart1_resources,
};
static struct resource sa11x0uart3_resources[] = {
[0] = {
.start = 0x80050000,
.end = 0x8005ffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device sa11x0uart3_device = {
.name = "sa11x0-uart",
.id = 3,
.num_resources = ARRAY_SIZE(sa11x0uart3_resources),
.resource = sa11x0uart3_resources,
};
static struct resource sa11x0mcp_resources[] = {
[0] = {
.start = 0x80060000,
......@@ -218,6 +264,8 @@ static struct platform_device sa11x0pcmcia_device = {
static struct platform_device *sa11x0_devices[] __initdata = {
&sa11x0udc_device,
&sa11x0uart1_device,
&sa11x0uart3_device,
&sa11x0mcp_device,
&sa11x0ssp_device,
&sa11x0pcmcia_device,
......
......@@ -223,6 +223,17 @@ config CPU_XSCALE
select CPU_TLB_V4WBI
select CPU_MINICACHE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
depends on ARCH_INTEGRATOR
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
select CPU_COPY_V6
select CPU_TLB_V6
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
bool
......@@ -233,6 +244,9 @@ config CPU_32v4
config CPU_32v5
bool
config CPU_32v6
bool
# The abort model
config CPU_ABRT_EV4
bool
......@@ -249,6 +263,9 @@ config CPU_ABRT_EV5T
config CPU_ABRT_EV5TJ
bool
config CPU_ABRT_EV6
bool
# The cache model
config CPU_CACHE_V3
bool
......@@ -262,6 +279,9 @@ config CPU_CACHE_V4WT
config CPU_CACHE_V4WB
bool
config CPU_CACHE_V6
bool
# The copy-page model
config CPU_COPY_V3
bool
......@@ -272,6 +292,9 @@ config CPU_COPY_V4WT
config CPU_COPY_V4WB
bool
config CPU_COPY_V6
bool
# This selects the TLB model
config CPU_TLB_V3
bool
......@@ -306,7 +329,7 @@ comment "Processor Features"
config ARM_THUMB
bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_V6
default y
help
Say Y if you want to include kernel support for running user space
......
......@@ -15,15 +15,18 @@ obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o
obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
obj-$(CONFIG_CPU_ABRT_EV5T) += abort-ev5t.o
obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
......@@ -33,6 +36,7 @@ obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
......@@ -48,3 +52,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110) += proc-sa110.o
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
obj-$(CONFIG_CPU_V6) += proc-v6.o blockops.o
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Function: v6_early_abort
*
* Params : r2 = address of aborted instruction
* : r3 = saved SPSR
*
* Returns : r0 = address of abort
* : r1 = FSR, bit 11 = write
* : r2-r8 = corrupted
* : r9 = preserved
* : sp = pointer to registers
*
* Purpose : obtain information about current aborted instruction.
*/
.align 5
ENTRY(v6_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
mov pc, lr
......@@ -188,38 +188,33 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
mov r7, #0x11
and r6, r8, r7
and r2, r8, r7, lsl #1
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r2, r8, #0xaa
add r6, r6, r2, lsr #1
and r2, r8, r7, lsl #2
and r2, r6, #0xcc
and r6, r6, #0x33
add r6, r6, r2, lsr #2
and r2, r8, r7, lsl #3
add r6, r6, r2, lsr #3
add r6, r6, r6, lsr #4
and r2, r8, #0x0100 @ catch 'R' bit for push/pop
add r6, r6, r2, lsr #8
movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit)
adc r6, r6, r6, lsr #4 @ high + low nibble + R bit
and r6, r6, #15 @ number of regs to transfer
ldr r7, [sp, #13 << 2]
tst r8, #1 << 11
addne r7, r7, r6, lsl #2 @ increment SP if PUSH
subeq r7, r7, r6, lsl #2 @ decrement SP if POP
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [sp, #13 << 2]
mov pc, lr
.data_thumb_ldmstm:
mov r7, #0x11
and r6, r8, r7
and r2, r8, r7, lsl #1
and r6, r8, #0x55 @ hweight8(r8)
and r2, r8, #0xaa
add r6, r6, r2, lsr #1
and r2, r8, r7, lsl #2
and r2, r6, #0xcc
and r6, r6, #0x33
add r6, r6, r2, lsr #2
and r2, r8, r7, lsl #3
add r6, r6, r2, lsr #3
add r6, r6, r6, lsr #4
and r6, r6, #15 @ number of regs to transfer
and r5, r8, #7 << 8
ldr r7, [sp, r5, lsr #6]
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [sp, r5, lsr #6]
mov pc, lr
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
extern struct cpu_cache_fns blk_cache_fns;
#define HARVARD_CACHE
/*
* blk_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
*/
static void __attribute__((naked))
blk_flush_kern_dcache_page(void *kaddr)
{
asm(
"add r1, r0, %0 \n\
1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c5, 0 \n\
mcr p15, 0, r0, c7, c10, 4 \n\
mov pc, lr"
:
: "I" (PAGE_SIZE));
}
/*
* blk_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_inv_range_unified(unsigned long start, unsigned long end)
{
asm(
"tst r0, %0 \n\
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
tst r1, %0 \n\
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr"
:
: "I" (L1_CACHE_BYTES - 1));
}
static void __attribute__((naked))
blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
{
asm(
"tst r0, %0 \n\
mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
tst r1, %0 \n\
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr"
:
: "I" (L1_CACHE_BYTES - 1));
}
/*
* blk_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_clean_range(unsigned long start, unsigned long end)
{
asm(
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
mov r0, #0 \n\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
mov pc, lr");
}
/*
* blk_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static void __attribute__((naked))
blk_dma_flush_range(unsigned long start, unsigned long end)
{
asm(
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
mov pc, lr");
}
static int blockops_trap(struct pt_regs *regs, unsigned int instr)
{
regs->ARM_r4 |= regs->ARM_r2;
regs->ARM_pc += 4;
return 0;
}
static char *func[] = {
"Prefetch data range",
"Clean+Invalidate data range",
"Clean data range",
"Invalidate data range",
"Invalidate instr range"
};
static struct undef_hook blockops_hook __initdata = {
.instr_mask = 0x0fffffd0,
.instr_val = 0x0c401f00,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = blockops_trap,
};
static int __init blockops_check(void)
{
register unsigned int err asm("r4") = 0;
unsigned int cache_type;
int i;
asm("mcr p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
printk("Checking V6 block cache operations:\n");
register_undef_hook(&blockops_hook);
__asm__ ("mov r0, %0\n\t"
"mov r1, %1\n\t"
"mov r2, #1\n\t"
".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
"mov r2, #2\n\t"
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
"mov r2, #4\n\t"
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
"mov r2, #8\n\t"
".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
"mov r2, #16\n\t"
".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
:
: "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
: "r0", "r1", "r2");
unregister_undef_hook(&blockops_hook);
for (i = 0; i < ARRAY_SIZE(func); i++, err >>= 1)
printk("%30s: %ssupported\n", func[i], err & 1 ? "not " : "");
if ((err & 8) == 0) {
printk(" --> Using %s block cache invalidate\n",
cache_type & (1 << 24) ? "harvard" : "unified");
if (cache_type & (1 << 24))
cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
else
cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
}
if ((err & 4) == 0) {
printk(" --> Using block cache clean\n");
cpu_cache.dma_clean_range = blk_dma_clean_range;
}
if ((err & 2) == 0) {
printk(" --> Using block cache clean+invalidate\n");
cpu_cache.dma_flush_range = blk_dma_flush_range;
cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
}
return 0;
}
__initcall(blockops_check);
/*
* linux/arch/arm/mm/cache-v6.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include "proc-macros.S"
#define HARVARD_CACHE
#define CACHE_LINE_SIZE 32
#define D_CACHE_LINE_SIZE 32
/*
* v6_flush_cache_all()
*
* Flush the entire cache.
*
* It is assumed that:
*/
ENTRY(v6_flush_kern_cache_all)
mov r0, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
#endif
mov pc, lr
/*
* v6_flush_cache_all()
*
* Flush all TLB entries in a particular address space
*
* - mm - mm_struct describing address space
*/
ENTRY(v6_flush_user_cache_all)
/*FALLTHROUGH*/
/*
* v6_flush_cache_range(start, end, flags)
*
* Flush a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - flags - vm_area_struct flags describing address space
*
* It is assumed that:
* - we have a VIPT cache.
*/
ENTRY(v6_flush_user_cache_range)
mov pc, lr
/*
* v6_coherent_kern_range(start,end)
*
* Ensure that the I and D caches are coherent within specified
* region. This is typically used when code has been written to
* a memory region, and will be executed.
*
* - start - virtual start address of region
* - end - virtual end address of region
*
* It is assumed that:
* - the Icache does not read data from the write buffer
*/
ENTRY(v6_coherent_kern_range)
bic r0, r0, #CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
#endif
mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry
add r0, r0, #CACHE_LINE_SIZE
cmp r0, r1
blo 1b
#ifdef HARVARD_CACHE
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#endif
mov pc, lr
/*
* v6_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
*/
ENTRY(v6_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
#ifdef HARVARD_CACHE
mov r0, #0
mcr p15, 0, r0, c7, c10, 4
#endif
mov pc, lr
/*
* v6_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_inv_range)
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
#else
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* v6_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_clean_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
mcr p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* v6_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
#endif
add r0, r0, #D_CACHE_LINE_SIZE
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
__INITDATA
.type v6_cache_fns, #object
ENTRY(v6_cache_fns)
.long v6_flush_kern_cache_all
.long v6_flush_user_cache_all
.long v6_flush_user_cache_range
.long v6_coherent_kern_range
.long v6_flush_kern_dcache_page
.long v6_dma_inv_range
.long v6_dma_clean_range
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
/*
* linux/arch/arm/mm/copypage-v6.c
*
* Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
#if SHMLBA > 16384
#error FIX ME
#endif
#define from_address (0xffff8000)
#define from_pgprot PAGE_KERNEL
#define to_address (0xffffc000)
#define to_pgprot PAGE_KERNEL
static pte_t *from_pte;
static pte_t *to_pte;
static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED;
#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Copy the page, taking account of the cache colour.
*/
void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long from, to;
spin_lock(&v6_lock);
set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
flush_tlb_kernel_page(from);
flush_tlb_kernel_page(to);
copy_page((void *)to, (void *)from);
spin_unlock(&v6_lock);
}
void v6_clear_user_page(void *kaddr, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
spin_lock(&v6_lock);
set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
flush_tlb_kernel_page(to);
clear_page((void *)to);
spin_unlock(&v6_lock);
}
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page,
.cpu_copy_user_page = v6_copy_user_page,
};
static int __init v6_userpage_init(void)
{
pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset_k(from_address);
pmd = pmd_alloc(&init_mm, pgd, from_address);
if (!pmd)
BUG();
from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
if (!from_pte)
BUG();
to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
if (!to_pte)
BUG();
return 0;
}
__initcall(v6_userpage_init);
......@@ -585,20 +585,31 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(io_desc + i);
}
static inline void free_memmap(int node, unsigned long start, unsigned long end)
static inline void
free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
unsigned long pg, pgend;
start = __phys_to_virt(start);
end = __phys_to_virt(end);
pg = PAGE_ALIGN((unsigned long)(virt_to_page(start)));
pgend = ((unsigned long)(virt_to_page(end))) & PAGE_MASK;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn);
end_pg = pfn_to_page(end_pfn);
start = __virt_to_phys(pg);
end = __virt_to_phys(pgend);
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = __pa(end_pg) & PAGE_MASK;
free_bootmem_node(NODE_DATA(node), start, end - start);
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
}
static inline void free_unused_memmap_node(int node, struct meminfo *mi)
......@@ -615,7 +626,12 @@ static inline void free_unused_memmap_node(int node, struct meminfo *mi)
if (mi->bank[i].size == 0 || mi->bank[i].node != node)
continue;
bank_start = mi->bank[i].start & PAGE_MASK;
bank_start = mi->bank[i].start >> PAGE_SHIFT;
if (bank_start < prev_bank_end) {
printk(KERN_ERR "MEM: unordered memory banks. "
"Not freeing memmap.\n");
break;
}
/*
* If we had a previous bank, and there is a space
......@@ -625,7 +641,7 @@ static inline void free_unused_memmap_node(int node, struct meminfo *mi)
free_memmap(node, prev_bank_end, bank_start);
prev_bank_end = PAGE_ALIGN(mi->bank[i].start +
mi->bank[i].size);
mi->bank[i].size) >> PAGE_SHIFT;
}
}
......
......@@ -35,3 +35,17 @@
ldr \rd, [\rd, #TI_TASK]
ldr \rd, [\rd, #TSK_ACTIVE_MM]
.endm
/*
* mmid - get context id from mm pointer (mm->context.id)
*/
.macro mmid, rd, rn
ldr \rd, [\rn, #MM_CONTEXT_ID]
.endm
/*
* mask_asid - mask the ASID from the context ID
*/
.macro asid, rd, rn
and \rd, \rn, #255
.endm
/*
* linux/arch/arm/mm/proc-v6.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is the "shell" of the ARMv6 processor support.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/pgtable.h>
#include "proc-macros.S"
#define D_CACHE_LINE_SIZE 32
.macro cpsie, flags
.ifc \flags, f
.long 0xf1080040
.exitm
.endif
.ifc \flags, i
.long 0xf1080080
.exitm
.endif
.ifc \flags, if
.long 0xf10800c0
.exitm
.endif
.err
.endm
.macro cpsid, flags
.ifc \flags, f
.long 0xf10c0040
.exitm
.endif
.ifc \flags, i
.long 0xf10c0080
.exitm
.endif
.ifc \flags, if
.long 0xf10c00c0
.exitm
.endif
.err
.endm
ENTRY(cpu_v6_proc_init)
mov pc, lr
ENTRY(cpu_v6_proc_fin)
mov pc, lr
/*
* cpu_v6_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* - loc - location to jump to for soft reset
*
* It is assumed that:
*/
.align 5
ENTRY(cpu_v6_reset)
mov pc, r0
/*
* cpu_v6_do_idle()
*
* Idle the processor (eg, wait for interrupt).
*
* IRQs are already disabled.
*/
ENTRY(cpu_v6_do_idle)
mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
mov pc, lr
ENTRY(cpu_v6_dcache_clean_area)
#ifndef TLB_CAN_READ_FROM_L1_CACHE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #D_CACHE_LINE_SIZE
subs r1, r1, #D_CACHE_LINE_SIZE
bhi 1b
#endif
mov pc, lr
/*
* cpu_arm926_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys
*
* - pgd_phys - physical address of new TTB
*
* It is assumed that:
* - we are not using split page tables
*/
ENTRY(cpu_v6_switch_mm)
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
mcr p15, 0, r1, c13, c0, 1 @ set context ID
mov pc, lr
#define nG (1 << 11)
#define APX (1 << 9)
#define AP1 (1 << 5)
#define AP0 (1 << 4)
#define XN (1 << 0)
/*
* cpu_v6_set_pte(ptep, pte)
*
* Set a level 2 translation table entry.
*
* - ptep - pointer to level 2 translation table entry
* (hardware version is stored at -1024 bytes)
* - pte - PTE value to store
*
* Permissions:
* YUWD APX AP1 AP0 SVC User
* 0xxx 0 0 0 no acc no acc
* 100x 1 0 1 r/o no acc
* 10x0 1 0 1 r/o no acc
* 1011 0 0 1 r/w no acc
* 110x 1 1 0 r/o r/o
* 11x0 1 1 0 r/o r/o
* 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v6_set_pte)
str r1, [r0], #-2048 @ linux version
bic r2, r1, #0x00000ff0
bic r2, r2, #0x00000003
orr r2, r2, #AP0 | 2
tst r1, #L_PTE_WRITE
tstne r1, #L_PTE_DIRTY
orreq r2, r2, #APX
tst r1, #L_PTE_USER
orrne r2, r2, #AP1 | nG
tstne r2, #APX
eorne r2, r2, #AP0
tst r1, #L_PTE_YOUNG
biceq r2, r2, #APX | AP1 | AP0
@ tst r1, #L_PTE_EXEC
@ orreq r2, r2, #XN
tst r1, #L_PTE_PRESENT
moveq r2, #0
str r2, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
mov pc, lr
cpu_v6_name:
.asciz "Some Random V6 Processor"
.align
.section ".text.init", #alloc, #execinstr
/*
* __v6_setup
*
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
* We automatically detect if we have a Harvard cache, and use the
* Harvard cache control instructions insead of the unified cache
* control instructions.
*
* This should be able to cover all ARMv6 cores.
*
* It is assumed that:
* - cache type register is implemented
*/
__v6_setup:
mrc p15, 0, r10, c0, c0, 1 @ read cache type register
tst r10, #1 << 24 @ Harvard cache?
mov r10, #0
mcrne p15, 0, r10, c7, c14, 0 @ clean+invalidate D cache
mcrne p15, 0, r10, c7, c5, 0 @ invalidate I cache
mcreq p15, 0, r10, c7, c15, 0 @ clean+invalidate cache
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
mcr p15, 0, r4, c2, c0, 0 @ load TTB0
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
mrc p15, 0, r0, c1, c0, 0 @ read control register
ldr r10, cr1_clear @ get mask for bits to clear
bic r0, r0, r10 @ clear bits them
ldr r10, cr1_set @ get mask for bits to set
orr r0, r0, r10 @ set them
mov pc, lr @ return to head.S:__ret
/*
* V X F I D LR
* .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
* rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 0 110 0011 1.00 .111 1101 < we want
*/
.type cr1_clear, #object
.type cr1_set, #object
cr1_clear:
.word 0x0120c302
cr1_set:
.word 0x00c0387d
.type v6_processor_functions, #object
ENTRY(v6_processor_functions)
.word v6_early_abort
.word cpu_v6_proc_init
.word cpu_v6_proc_fin
.word cpu_v6_reset
.word cpu_v6_do_idle
.word cpu_v6_dcache_clean_area
.word cpu_v6_switch_mm
.word cpu_v6_set_pte
.size v6_processor_functions, . - v6_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv6"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v6"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
/*
* Match any ARMv6 processor core.
*/
.type __v6_proc_info, #object
__v6_proc_info:
.long 0x00070000
.long 0x00ff0000
.long 0x00000c0e
b __v6_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_FAST_MULT | HWCAP_VFP
.long cpu_v6_name
.long v6_processor_functions
.long v6wbi_tlb_fns
.long v6_user_fns
.long v6_cache_fns
.size __v6_proc_info, . - __v6_proc_info
/*
* linux/arch/arm/mm/tlb-v6.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB.
*/
#include <linux/linkage.h>
#include <asm/constants.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
#define HARVARD_TLB
/*
* v6wbi_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
ENTRY(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0
mmid r3, r3 @ get vm_mm->context.id
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
vma_vm_flags r2, r2 @ get vma->vm_flags
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
tst r2, #VM_EXEC @ Executable area ?
mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1)
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
/*
* v6wbi_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(v6wbi_flush_kern_tlb_range)
mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA
mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov pc, lr
.section ".text.init", #alloc, #execinstr
.type v6wbi_tlb_fns, #object
ENTRY(v6wbi_tlb_fns)
.long v6wbi_flush_user_tlb_range
.long v6wbi_flush_kern_tlb_range
.long v6wbi_tlb_flags
.size v6wbi_tlb_fns, . - v6wbi_tlb_fns
......@@ -50,11 +50,6 @@
#ifdef MODULE
void fp_send_sig(unsigned long sig, struct task_struct *p, int priv);
#if LINUX_VERSION_CODE > 0x20115
MODULE_AUTHOR("Scott Bambrough <scottb@rebel.com>");
MODULE_DESCRIPTION("NWFPE floating point emulator (" NWFPE_BITS " precision)");
#endif
#else
#define fp_send_sig send_sig
#define kern_fp_enter fp_enter
......@@ -172,3 +167,7 @@ void float_raise(signed char flags)
module_init(fpe_init);
module_exit(fpe_exit);
MODULE_AUTHOR("Scott Bambrough <scottb@rebel.com>");
MODULE_DESCRIPTION("NWFPE floating point emulator (" NWFPE_BITS " precision)");
MODULE_LICENSE("GPL");
......@@ -6,7 +6,7 @@
# To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk
#
# Last update: Thu Sep 18 17:15:55 2003
# Last update: Tue Feb 24 17:17:50 2004
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
......@@ -202,7 +202,7 @@ karo ARCH_KARO KARO 190
fester SA1100_FESTER FESTER 191
gpi ARCH_GPI GPI 192
smdk2410 ARCH_SMDK2410 SMDK2410 193
premium ARCH_PREMIUM PREMIUM 194
i519 ARCH_I519 I519 194
nexio SA1100_NEXIO NEXIO 195
bitbox SA1100_BITBOX BITBOX 196
g200 SA1100_G200 G200 197
......@@ -259,7 +259,7 @@ stork_nest ARCH_STORK_NEST STORK_NEST 247
stork_egg ARCH_STORK_EGG STORK_EGG 248
wismo SA1100_WISMO WISMO 249
ezlinx ARCH_EZLINX EZLINX 250
at91rm9200 ARCH_AT91 AT91 251
at91rm9200 ARCH_AT91RM9200 AT91RM9200 251
orion ARCH_ORION ORION 252
neptune ARCH_NEPTUNE NEPTUNE 253
hackkit SA1100_HACKKIT HACKKIT 254
......@@ -295,7 +295,7 @@ viper ARCH_VIPER VIPER 283
adsbitsyplus SA1100_ADSBITSYPLUS ADSBITSYPLUS 284
adsagc SA1100_ADSAGC ADSAGC 285
stp7312 ARCH_STP7312 STP7312 286
nx_phnx ARCH_PXA255 PXA255 287
nx_phnx MACH_NX_PHNX NX_PHNX 287
wep_ep250 ARCH_WEP_EP250 WEP_EP250 288
inhandelf3 ARCH_INHANDELF3 INHANDELF3 289
adi_coyote ARCH_ADI_COYOTE ADI_COYOTE 290
......@@ -364,7 +364,7 @@ ixrd425 ARCH_IXRD425 IXRD425 352
iq80315 ARCH_IQ80315 IQ80315 353
nmp7312 ARCH_NMP7312 NMP7312 354
cx861xx ARCH_CX861XX CX861XX 355
ixp2000 ARCH_IXP2000 IXP2000 356
enp2611 ARCH_ENP2611 ENP2611 356
xda SA1100_XDA XDA 357
csir_ims ARCH_CSIR_IMS CSIR_IMS 358
ixp421_dnaeeth ARCH_IXP421_DNAEETH IXP421_DNAEETH 359
......@@ -385,3 +385,92 @@ gumstik ARCH_GUMSTIK GUMSTIK 373
rcube ARCH_RCUBE RCUBE 374
rea_olv ARCH_REA_OLV REA_OLV 375
pxa_iphone ARCH_PXA_IPHONE PXA_IPHONE 376
s3c3410 ARCH_S3C3410 S3C3410 377
espd_4510b ARCH_ESPD_4510B ESPD_4510B 378
mp1x ARCH_MP1X MP1X 379
at91rm9200tb ARCH_AT91RM9200TB AT91RM9200TB 380
adsvgx ARCH_ADSVGX ADSVGX 381
omap1610 ARCH_OMAP1610 OMAP1610 382
pelee ARCH_PELEE PELEE 383
e7xx ARCH_E7XX E7XX 384
iq80331 ARCH_IQ80331 IQ80331 385
versatile_pb ARCH_VERSATILE_PB VERSATILE_PB 387
kev7a400 MACH_KEV7A400 KEV7A400 388
lpd7a400 MACH_LPD7A400 LPD7A400 389
lpd7a404 MACH_LPD7A404 LPD7A404 390
fujitsu_camelot ARCH_FUJITSU_CAMELOT FUJITSU_CAMELOT 391
janus2m ARCH_JANUS2M JANUS2M 392
embtf MACH_EMBTF EMBTF 393
hpm MACH_HPM HPM 394
smdk2410tk MACH_SMDK2410TK SMDK2410TK 395
smdk2410aj MACH_SMDK2410AJ SMDK2410AJ 396
streetracer MACH_STREETRACER STREETRACER 397
eframe MACH_EFRAME EFRAME 398
csb337 MACH_CSB337 CSB337 399
pxa_lark MACH_PXA_LARK PXA_LARK 400
pxa_pnp2110 MACH_PNP2110 PNP2110 401
tcc72x MACH_TCC72X TCC72X 402
altair MACH_ALTAIR ALTAIR 403
kc3 MACH_KC3 KC3 404
sinteftd MACH_SINTEFTD SINTEFTD 405
mainstone MACH_MAINSTONE MAINSTONE 406
aday4x MACH_ADAY4X ADAY4X 407
lite300 MACH_LITE300 LITE300 408
s5c7376 MACH_S5C7376 S5C7376 409
mt02 MACH_MT02 MT02 410
mport3s MACH_MPORT3S MPORT3S 411
ra_alpha MACH_RA_ALPHA RA_ALPHA 412
xcep MACH_XCEP XCEP 413
arcom_mercury MACH_ARCOM_MERCURY ARCOM_MERCURY 414
stargate MACH_STARGATE STARGATE 415
armadilloj MACH_ARMADILLOJ ARMADILLOJ 416
elroy_jack MACH_ELROY_JACK ELROY_JACK 417
backend MACH_BACKEND BACKEND 418
s5linbox MACH_S5LINBOX S5LINBOX 419
nomadik MACH_NOMADIK NOMADIK 420
ia_cpu_9200 MACH_IA_CPU_9200 IA_CPU_9200 421
at91_bja1 MACH_AT91_BJA1 AT91_BJA1 422
corgi MACH_CORGI CORGI 423
poodle MACH_POODLE POODLE 424
ten MACH_TEN TEN 425
roverp5p MACH_ROVERP5P ROVERP5P 426
sc2700 MACH_SC2700 SC2700 427
ex_eagle MACH_EX_EAGLE EX_EAGLE 428
nx_pxa12 MACH_NX_PXA12 NX_PXA12 429
nx_pxa5 MACH_NX_PXA5 NX_PXA5 430
blackboard2 MACH_BLACKBOARD2 BLACKBOARD2 431
i819 MACH_I819 I819 432
ixmb995e MACH_IXMB995E IXMB995E 433
skyrider MACH_SKYRIDER SKYRIDER 434
skyhawk MACH_SKYHAWK SKYHAWK 435
enterprise MACH_ENTERPRISE ENTERPRISE 436
dep2410 MACH_DEP2410 DEP2410 437
armcore MACH_ARMCORE ARMCORE 438
hobbit MACH_HOBBIT HOBBIT 439
h7210 MACH_H7210 H7210 440
pxa_netdcu5 MACH_PXA_NETDCU5 PXA_NETDCU5 441
acc MACH_ACC ACC 442
esl_sarva MACH_ESL_SARVA ESL_SARVA 443
xm250 MACH_XM250 XM250 444
t6tc1xb MACH_T6TC1XB T6TC1XB 445
ess710 MACH_ESS710 ESS710 446
mx3ads MACH_MX3ADS MX3ADS 447
himalaya MACH_HIMALAYA HIMALAYA 448
bolfenk MACH_BOLFENK BOLFENK 449
at91rm9200kr MACH_AT91RM9200KR AT91RM9200KR 450
edb9312 MACH_EDB9312 EDB9312 451
omap_generic MACH_OMAP_GENERIC OMAP_GENERIC 452
aximx3 MACH_AXIMX3 AXIMX3 453
eb67xdip MACH_EB67XDIP EB67XDIP 454
webtxs MACH_WEBTXS WEBTXS 455
hawk MACH_HAWK HAWK 456
ccat91sbc001 MACH_CCAT91SBC001 CCAT91SBC001 457
expresso MACH_EXPRESSO EXPRESSO 458
h4000 MACH_H4000 H4000 459
dino MACH_DINO DINO 460
ml675k MACH_ML675K ML675K 461
edb9301 MACH_EDB9301 EDB9301 462
edb9315 MACH_EDB9315 EDB9315 463
reciva_tt MACH_RECIVA_TT RECIVA_TT 464
cstcb01 MACH_CSTCB01 CSTCB01 465
cstcb1 MACH_CSTCB1 CSTCB1 466
/*
* linux/include/asm-arm/atomic.h
*
* Copyright (c) 1996 Russell King.
* Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 27-06-1996 RMK Created
* 13-04-1997 RMK Made functions atomic!
* 07-12-1997 RMK Upgraded for v2.1.
* 26-08-1998 PJB Added #ifdef __KERNEL__
*/
#ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H
#include <linux/config.h>
#ifdef CONFIG_SMP
#error SMP not supported
#endif
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#include <asm/system.h>
#define atomic_read(v) ((v)->counter)
#if __LINUX_ARM_ARCH__ >= 6
/*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. Writing to 'v->counter'
* without using the following operations WILL break the atomic
* nature of these ops.
*/
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long tmp;
__asm__ __volatile__("@ atomic_set\n"
"1: ldrex %0, [%1]\n"
" strex %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline void atomic_add(int i, volatile atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline void atomic_sub(int i, volatile atomic_t *v)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
static inline int atomic_dec_and_test(volatile atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_dec_and_test\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=r" (tmp)
: "r" (&v->counter)
: "cc");
return result == 0;
}
static inline int atomic_add_negative(int i, volatile atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_add_negative\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
return result < 0;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned long tmp, tmp2;
__asm__ __volatile__("@ atomic_clear_mask\n"
"1: ldrex %0, %2\n"
" bic %0, %0, %3\n"
" strex %1, %0, %2\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (addr), "Ir" (mask)
: "cc");
}
#else /* ARM_ARCH_6 */
#include <asm/system.h>
#ifdef CONFIG_SMP
#error SMP not supported on pre-ARMv6 CPUs
#endif
#define atomic_set(v,i) (((v)->counter) = (i))
static inline void atomic_add(int i, volatile atomic_t *v)
......@@ -103,6 +209,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
local_irq_restore(flags);
}
#endif /* __LINUX_ARM_ARCH__ */
/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
......
......@@ -69,6 +69,14 @@
# endif
#endif
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
......
......@@ -22,12 +22,12 @@ extern void consistent_sync(void *kaddr, size_t size, int rw);
* For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs.
*/
dma_addr_t sa1111_map_single(void *, size_t, int);
void sa1111_unmap_single(dma_addr_t, size_t, int);
int sa1111_map_sg(struct scatterlist *, int, int);
void sa1111_unmap_sg(struct scatterlist *, int, int);
void sa1111_dma_sync_single(dma_addr_t, size_t, int);
void sa1111_dma_sync_sg(struct scatterlist *, int, int);
dma_addr_t sa1111_map_single(struct device *dev, void *, size_t, enum dma_data_direction);
void sa1111_unmap_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
int sa1111_map_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_unmap_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_dma_sync_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
void sa1111_dma_sync_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
#ifdef CONFIG_SA1111
......@@ -122,7 +122,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
return sa1111_map_single(cpu_addr, size, dir);
return sa1111_map_single(dev, cpu_addr, size, dir);
consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr);
......@@ -169,7 +169,7 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
sa1111_unmap_single(handle, size, dir);
sa1111_unmap_single(dev, handle, size, dir);
/* nothing to do */
}
......@@ -224,7 +224,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
int i;
if (dmadev_is_sa1111(dev))
return sa1111_map_sg(sg, nents, dir);
return sa1111_map_sg(dev, sg, nents, dir);
for (i = 0; i < nents; i++, sg++) {
char *virt;
......@@ -253,7 +253,7 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_unmap_sg(sg, nents, dir);
sa1111_unmap_sg(dev, sg, nents, dir);
return;
}
......@@ -281,7 +281,7 @@ dma_sync_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single(handle, size, dir);
sa1111_dma_sync_single(dev, handle, size, dir);
return;
}
......@@ -308,7 +308,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nents,
int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg(sg, nents, dir);
sa1111_dma_sync_sg(dev, sg, nents, dir);
return;
}
......
......@@ -38,6 +38,7 @@
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
*/
#undef CPU_ABORT_HANDLER
#undef MULTI_ABORT
......@@ -98,6 +99,14 @@
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_ABORT_HANDLER
# define MULTI_ABORT 1
# else
# define CPU_ABORT_HANDLER v6_early_abort
# endif
#endif
#ifndef CPU_ABORT_HANDLER
#error Unknown data abort handler type
#endif
......
......@@ -12,6 +12,127 @@
#ifndef __ASM_PROC_LOCKS_H
#define __ASM_PROC_LOCKS_H
#if __LINUX_ARM_ARCH__ >= 6
#define __down_op(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op\n" \
"1: ldrex lr, [%0]\n" \
" sub lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movmi ip, %0\n" \
" blmi " #fail \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
#define __down_op_ret(ptr,fail) \
({ \
unsigned int ret; \
__asm__ __volatile__( \
"@ down_op_ret\n" \
"1: ldrex lr, [%1]\n" \
" sub lr, lr, %2\n" \
" strex ip, lr, [%1]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movmi ip, %1\n" \
" movpl ip, #0\n" \
" blmi " #fail "\n" \
" mov %0, ip" \
: "=&r" (ret) \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
ret; \
})
#define __up_op(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movle ip, %0\n" \
" blle " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
/*
* The value 0x01000000 supports up to 128 processors and
* lots of processes. BIAS must be chosen such that sub'ing
* BIAS once per CPU will result in the long remaining
* negative.
*/
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
#define __down_op_write(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op_write\n" \
"1: ldrex lr, [%0]\n" \
" sub lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" movne ip, %0\n" \
" blne " #fail \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc", "memory"); \
})
#define __up_op_write(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" movcs ip, %0\n" \
" blcs " #wake \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc", "memory"); \
})
#define __down_op_read(ptr,fail) \
__down_op(ptr, fail)
#define __up_op_read(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \
" teq ip, #0\n" \
" bne 1b\n" \
" teq lr, #0\n" \
" moveq ip, %0\n" \
" bleq " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc", "memory"); \
})
#else
#define __down_op(ptr,fail) \
({ \
__asm__ __volatile__( \
......@@ -137,3 +258,5 @@
})
#endif
#endif
......@@ -84,6 +84,14 @@
# endif
#endif
#ifdef CONFIG_CPU_COPY_V6
# ifdef _USER
# define MULTI_USER 1
# else
# define _USER v6
# endif
#endif
#ifndef _USER
#error Unknown user operations model
#endif
......
......@@ -130,6 +130,14 @@
# define CPU_NAME cpu_xscale
# endif
# endif
# ifdef CONFIG_CPU_V6
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
# endif
#endif
#ifndef MULTI_CPU
......
......@@ -6,6 +6,10 @@
* or page size, whichever is greater since the cache aliases
* every size/ways bytes.
*/
#if __LINUX_ARM_ARCH__ > 5
#define SHMLBA (4 * PAGE_SIZE)
#else
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#endif
#endif /* _ASMARM_SHMPARAM_H */
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#error ARM architecture does not support SMP spin locks
#if __LINUX_ARM_ARCH__ < 6
#error SMP not supported on pre-ARMv6 CPUs
#endif
/*
* ARMv6 Spin-locking.
*
* We (exclusively) read the old value, and decrement it. If it
* hits zero, we may have won the lock, so we try (exclusively)
* storing it.
*
* Unlocked value: 0
* Locked value: 1
*/
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
static inline void _raw_spin_lock(spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc", "memory");
}
static inline int _raw_spin_trylock(spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
" ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
: "cc", "memory");
return tmp == 0;
}
static inline void _raw_spin_unlock(spinlock_t *lock)
{
__asm__ __volatile__(
" str %1, [%0]"
:
: "r" (&lock->lock), "r" (0)
: "cc", "memory");
}
/*
* RWLOCKS
*/
typedef struct {
volatile unsigned int lock;
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0)
/*
* Write locks are easy - we just set bit 31. When unlocking, we can
* just write zero since the lock is exclusively held.
*/
static inline void _raw_write_lock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
}
static inline void _raw_write_unlock(rwlock_t *rw)
{
__asm__ __volatile__(
"str %1, [%0]"
:
: "r" (&rw->lock), "r" (0)
: "cc", "memory");
}
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
* - Increment it.
* - Store new lock value if positive, and we still own this location.
* If the value is negative, we've already failed.
* - If we failed to store the value, we want a negative result.
* - If we failed, try again.
* Unlocking is similarly hairy. We may have multiple read locks
* currently active. However, we know we won't have any write
* locks.
*/
static inline void _raw_read_lock(rwlock_t *rw)
{
unsigned long tmp, tmp2;
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
" rsbpls %0, %1, #0\n"
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc", "memory");
}
static inline void _raw_read_unlock(rwlock_t *rw)
{
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc", "memory");
}
static inline int _raw_write_trylock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
return tmp == 0;
}
#endif /* __ASM_SPINLOCK_H */
......@@ -12,7 +12,8 @@
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv6 7
#define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
/*
* CR1 bits (CP#15 CR1)
......@@ -123,6 +124,26 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
mb(); \
} while (0)
/*
* CPU interrupt mask handling.
*/
#if __LINUX_ARM_ARCH__ >= 6
#define local_irq_save(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \
"cpsid i" \
: "=r" (x) : : "memory", "cc"); \
})
#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
#else
/*
* Save the current interrupt enable state & disable IRQs
*/
......@@ -199,6 +220,8 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
: "memory", "cc"); \
})
#endif
/*
* Save the current interrupt enable state.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment