Commit d9142025 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc

arm-soc fixes for 3.3-rc

* A series of OMAP regression fixes for merge window fallout
* Two patches for Davinci, one removes some misdefined clocks, the other
  is a regression fix for merge window fallout
* Two patches that makes Broadcom bcmring build again (and removes a
  bunch of unused code in the process)

* tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc:
  ARM: bcmring: fix build failure in mach-bcmring/arch.c
  ARM: bcmring: remove unused DMA map code
  ARM: davinci: update mdio bus name
  ARM: OMAP2+: arch/arm/mach-omap2/smartreflex.c: add missing iounmap
  ARM: OMAP2+: arch/arm/mach-omap2/devices.c: introduce missing kfree
  ARM: OMAP: fix MMC2 loopback clock handling
  ARM: OMAP: fix erroneous mmc2 clock change on mmc3 setup
  ARM: OMAP2+: GPMC: fix device size setup
  ARM: OMAP2+: timer: Fix crash due to wrong arg to __omap_dm_timer_read_counter
  ARM: OMAP3: hwmod data: register dss hwmods after dss_core
  ARM: OMAP2/3: PRM: fix missing plat/irqs.h build breakage
  ARM: OMAP2+: io: fix compilation breakage on 2420-only configs
  ARM: OMAP4: hwmod data: Add names for DMIC memory address space
  ARM: OMAP3: hwmod data: add SYSC_HAS_ENAWAKEUP for dispc
  ARM: OMAP2+: hwmod data: split omap2/3 dispc hwmod class
  ARM: davinci: DA850: remove non-existing pll1_sysclk4-7 clocks
  ARM: OMAP2: fix regulator warnings
  ARM: OMAP2: fix omap3 touchbook kconfig warning
  i2c: OMAP: Fix OMAP1 build error
parents 31c150a1 ca43784d
......@@ -194,6 +194,6 @@ MACHINE_START(BCMRING, "BCMRING")
.init_early = bcmring_init_early,
.init_irq = bcmring_init_irq,
.timer = &bcmring_timer,
.init_machine = bcmring_init_machine
.init_machine = bcmring_init_machine,
.restart = bcmring_restart,
MACHINE_END
......@@ -33,17 +33,11 @@
#include <mach/timer.h>
#include <linux/mm.h>
#include <linux/pfn.h>
#include <linux/atomic.h>
#include <linux/sched.h>
#include <mach/dma.h>
/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
/* especially since dc4 doesn't use kmalloc'd memory. */
#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
/* ---- Public Variables ------------------------------------------------- */
/* ---- Private Constants and Types -------------------------------------- */
......@@ -53,58 +47,18 @@
#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
#define DMA_MAP_DEBUG 0
#if DMA_MAP_DEBUG
# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args)
#else
# define DMA_MAP_PRINT(fmt, args...)
#endif
/* ---- Private Variables ------------------------------------------------ */
static DMA_Global_t gDMA;
static struct proc_dir_entry *gDmaDir;
static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
#include "dma_device.c"
/* ---- Private Function Prototypes -------------------------------------- */
/* ---- Functions ------------------------------------------------------- */
/****************************************************************************/
/**
* Displays information for /proc/dma/mem-type
*/
/****************************************************************************/
static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
int count, int *eof, void *data)
{
int len = 0;
len += sprintf(buf + len, "dma_map_mem statistics\n");
len +=
sprintf(buf + len, "coherent: %d\n",
atomic_read(&gDmaStatMemTypeCoherent));
len +=
sprintf(buf + len, "kmalloc: %d\n",
atomic_read(&gDmaStatMemTypeKmalloc));
len +=
sprintf(buf + len, "vmalloc: %d\n",
atomic_read(&gDmaStatMemTypeVmalloc));
len +=
sprintf(buf + len, "user: %d\n",
atomic_read(&gDmaStatMemTypeUser));
return len;
}
/****************************************************************************/
/**
* Displays information for /proc/dma/channels
......@@ -846,8 +800,6 @@ int dma_init(void)
dma_proc_read_channels, NULL);
create_proc_read_entry("devices", 0, gDmaDir,
dma_proc_read_devices, NULL);
create_proc_read_entry("mem-type", 0, gDmaDir,
dma_proc_read_mem_type, NULL);
}
out:
......@@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for.
}
EXPORT_SYMBOL(dma_set_device_handler);
/****************************************************************************/
/**
* Initializes a memory mapping structure
*/
/****************************************************************************/
int dma_init_mem_map(DMA_MemMap_t *memMap)
{
memset(memMap, 0, sizeof(*memMap));
sema_init(&memMap->lock, 1);
return 0;
}
EXPORT_SYMBOL(dma_init_mem_map);
/****************************************************************************/
/**
* Releases any memory currently being held by a memory mapping structure.
*/
/****************************************************************************/
int dma_term_mem_map(DMA_MemMap_t *memMap)
{
down(&memMap->lock); /* Just being paranoid */
/* Free up any allocated memory */
up(&memMap->lock);
memset(memMap, 0, sizeof(*memMap));
return 0;
}
EXPORT_SYMBOL(dma_term_mem_map);
/****************************************************************************/
/**
* Looks at a memory address and categorizes it.
*
* @return One of the values from the DMA_MemType_t enumeration.
*/
/****************************************************************************/
DMA_MemType_t dma_mem_type(void *addr)
{
unsigned long addrVal = (unsigned long)addr;
if (addrVal >= CONSISTENT_BASE) {
/* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
/* dma_alloc_xxx pages are physically and virtually contiguous */
return DMA_MEM_TYPE_DMA;
}
/* Technically, we could add one more classification. Addresses between VMALLOC_END */
/* and the beginning of the DMA virtual address could be considered to be I/O space. */
/* Right now, nobody cares about this particular classification, so we ignore it. */
if (is_vmalloc_addr(addr)) {
/* Address comes from the vmalloc'd region. Pages are virtually */
/* contiguous but NOT physically contiguous */
return DMA_MEM_TYPE_VMALLOC;
}
if (addrVal >= PAGE_OFFSET) {
/* PAGE_OFFSET is typically 0xC0000000 */
/* kmalloc'd pages are physically contiguous */
return DMA_MEM_TYPE_KMALLOC;
}
return DMA_MEM_TYPE_USER;
}
EXPORT_SYMBOL(dma_mem_type);
/****************************************************************************/
/**
* Looks at a memory address and determines if we support DMA'ing to/from
* that type of memory.
*
* @return boolean -
* return value != 0 means dma supported
* return value == 0 means dma not supported
*/
/****************************************************************************/
int dma_mem_supports_dma(void *addr)
{
DMA_MemType_t memType = dma_mem_type(addr);
return (memType == DMA_MEM_TYPE_DMA)
#if ALLOW_MAP_OF_KMALLOC_MEMORY
|| (memType == DMA_MEM_TYPE_KMALLOC)
#endif
|| (memType == DMA_MEM_TYPE_USER);
}
EXPORT_SYMBOL(dma_mem_supports_dma);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
enum dma_data_direction dir /* Direction that the mapping will be going */
) {
int rc;
down(&memMap->lock);
DMA_MAP_PRINT("memMap: %p\n", memMap);
if (memMap->inUse) {
printk(KERN_ERR "%s: memory map %p is already being used\n",
__func__, memMap);
rc = -EBUSY;
goto out;
}
memMap->inUse = 1;
memMap->dir = dir;
memMap->numRegionsUsed = 0;
rc = 0;
out:
DMA_MAP_PRINT("returning %d", rc);
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_map_start);
/****************************************************************************/
/**
* Adds a segment of memory to a memory map. Each segment is both
* physically and virtually contiguous.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */
DMA_Region_t *region, /* Region that the segment belongs to */
void *virtAddr, /* Virtual address of the segment being added */
dma_addr_t physAddr, /* Physical address of the segment being added */
size_t numBytes /* Number of bytes of the segment being added */
) {
DMA_Segment_t *segment;
DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
physAddr, numBytes);
/* Sanity check */
if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
|| (((unsigned long)virtAddr + numBytes)) >
((unsigned long)region->virtAddr + region->numBytes)) {
printk(KERN_ERR
"%s: virtAddr %p is outside region @ %p len: %d\n",
__func__, virtAddr, region->virtAddr, region->numBytes);
return -EINVAL;
}
if (region->numSegmentsUsed > 0) {
/* Check to see if this segment is physically contiguous with the previous one */
segment = &region->segment[region->numSegmentsUsed - 1];
if ((segment->physAddr + segment->numBytes) == physAddr) {
/* It is - just add on to the end */
DMA_MAP_PRINT("appending %d bytes to last segment\n",
numBytes);
segment->numBytes += numBytes;
return 0;
}
}
/* Reallocate to hold more segments, if required. */
if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
DMA_Segment_t *newSegment;
size_t oldSize =
region->numSegmentsAllocated * sizeof(*newSegment);
int newAlloc = region->numSegmentsAllocated + 4;
size_t newSize = newAlloc * sizeof(*newSegment);
newSegment = kmalloc(newSize, GFP_KERNEL);
if (newSegment == NULL) {
return -ENOMEM;
}
memcpy(newSegment, region->segment, oldSize);
memset(&((uint8_t *) newSegment)[oldSize], 0,
newSize - oldSize);
kfree(region->segment);
region->numSegmentsAllocated = newAlloc;
region->segment = newSegment;
}
segment = &region->segment[region->numSegmentsUsed];
region->numSegmentsUsed++;
segment->virtAddr = virtAddr;
segment->physAddr = physAddr;
segment->numBytes = numBytes;
DMA_MAP_PRINT("returning success\n");
return 0;
}
/****************************************************************************/
/**
* Adds a region of memory to a memory map. Each region is virtually
* contiguous, but not necessarily physically contiguous.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *mem, /* Virtual address that we want to get a map of */
size_t numBytes /* Number of bytes being mapped */
) {
unsigned long addr = (unsigned long)mem;
unsigned int offset;
int rc = 0;
DMA_Region_t *region;
dma_addr_t physAddr;
down(&memMap->lock);
DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
if (!memMap->inUse) {
printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
__func__);
rc = -EINVAL;
goto out;
}
/* Reallocate to hold more regions. */
if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
DMA_Region_t *newRegion;
size_t oldSize =
memMap->numRegionsAllocated * sizeof(*newRegion);
int newAlloc = memMap->numRegionsAllocated + 4;
size_t newSize = newAlloc * sizeof(*newRegion);
newRegion = kmalloc(newSize, GFP_KERNEL);
if (newRegion == NULL) {
rc = -ENOMEM;
goto out;
}
memcpy(newRegion, memMap->region, oldSize);
memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
kfree(memMap->region);
memMap->numRegionsAllocated = newAlloc;
memMap->region = newRegion;
}
region = &memMap->region[memMap->numRegionsUsed];
memMap->numRegionsUsed++;
offset = addr & ~PAGE_MASK;
region->memType = dma_mem_type(mem);
region->virtAddr = mem;
region->numBytes = numBytes;
region->numSegmentsUsed = 0;
region->numLockedPages = 0;
region->lockedPages = NULL;
switch (region->memType) {
case DMA_MEM_TYPE_VMALLOC:
{
atomic_inc(&gDmaStatMemTypeVmalloc);
/* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
/* vmalloc'd pages are not physically contiguous */
rc = -EINVAL;
break;
}
case DMA_MEM_TYPE_KMALLOC:
{
atomic_inc(&gDmaStatMemTypeKmalloc);
/* kmalloc'd pages are physically contiguous, so they'll have exactly */
/* one segment */
#if ALLOW_MAP_OF_KMALLOC_MEMORY
physAddr =
dma_map_single(NULL, mem, numBytes, memMap->dir);
rc = dma_map_add_segment(memMap, region, mem, physAddr,
numBytes);
#else
rc = -EINVAL;
#endif
break;
}
case DMA_MEM_TYPE_DMA:
{
/* dma_alloc_xxx pages are physically contiguous */
atomic_inc(&gDmaStatMemTypeCoherent);
physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
dma_sync_single_for_cpu(NULL, physAddr, numBytes,
memMap->dir);
rc = dma_map_add_segment(memMap, region, mem, physAddr,
numBytes);
break;
}
case DMA_MEM_TYPE_USER:
{
size_t firstPageOffset;
size_t firstPageSize;
struct page **pages;
struct task_struct *userTask;
atomic_inc(&gDmaStatMemTypeUser);
#if 1
/* If the pages are user pages, then the dma_mem_map_set_user_task function */
/* must have been previously called. */
if (memMap->userTask == NULL) {
printk(KERN_ERR
"%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
__func__);
return -EINVAL;
}
/* User pages need to be locked. */
firstPageOffset =
(unsigned long)region->virtAddr & (PAGE_SIZE - 1);
firstPageSize = PAGE_SIZE - firstPageOffset;
region->numLockedPages = (firstPageOffset
+ region->numBytes +
PAGE_SIZE - 1) / PAGE_SIZE;
pages =
kmalloc(region->numLockedPages *
sizeof(struct page *), GFP_KERNEL);
if (pages == NULL) {
region->numLockedPages = 0;
return -ENOMEM;
}
userTask = memMap->userTask;
down_read(&userTask->mm->mmap_sem);
rc = get_user_pages(userTask, /* task */
userTask->mm, /* mm */
(unsigned long)region->virtAddr, /* start */
region->numLockedPages, /* len */
memMap->dir == DMA_FROM_DEVICE, /* write */
0, /* force */
pages, /* pages (array of pointers to page) */
NULL); /* vmas */
up_read(&userTask->mm->mmap_sem);
if (rc != region->numLockedPages) {
kfree(pages);
region->numLockedPages = 0;
if (rc >= 0) {
rc = -EINVAL;
}
} else {
uint8_t *virtAddr = region->virtAddr;
size_t bytesRemaining;
int pageIdx;
rc = 0; /* Since get_user_pages returns +ve number */
region->lockedPages = pages;
/* We've locked the user pages. Now we need to walk them and figure */
/* out the physical addresses. */
/* The first page may be partial */
dma_map_add_segment(memMap,
region,
virtAddr,
PFN_PHYS(page_to_pfn
(pages[0])) +
firstPageOffset,
firstPageSize);
virtAddr += firstPageSize;
bytesRemaining =
region->numBytes - firstPageSize;
for (pageIdx = 1;
pageIdx < region->numLockedPages;
pageIdx++) {
size_t bytesThisPage =
(bytesRemaining >
PAGE_SIZE ? PAGE_SIZE :
bytesRemaining);
DMA_MAP_PRINT
("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
pageIdx, pages[pageIdx],
page_to_pfn(pages[pageIdx]),
PFN_PHYS(page_to_pfn
(pages[pageIdx])));
dma_map_add_segment(memMap,
region,
virtAddr,
PFN_PHYS(page_to_pfn
(pages
[pageIdx])),
bytesThisPage);
virtAddr += bytesThisPage;
bytesRemaining -= bytesThisPage;
}
}
#else
printk(KERN_ERR
"%s: User mode pages are not yet supported\n",
__func__);
/* user pages are not physically contiguous */
rc = -EINVAL;
#endif
break;
}
default:
{
printk(KERN_ERR "%s: Unsupported memory type: %d\n",
__func__, region->memType);
rc = -EINVAL;
break;
}
}
if (rc != 0) {
memMap->numRegionsUsed--;
}
out:
DMA_MAP_PRINT("returning %d\n", rc);
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_map_add_segment);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *mem, /* Virtual address that we want to get a map of */
size_t numBytes, /* Number of bytes being mapped */
enum dma_data_direction dir /* Direction that the mapping will be going */
) {
int rc;
rc = dma_map_start(memMap, dir);
if (rc == 0) {
rc = dma_map_add_region(memMap, mem, numBytes);
if (rc < 0) {
/* Since the add fails, this function will fail, and the caller won't */
/* call unmap, so we need to do it here. */
dma_unmap(memMap, 0);
}
}
return rc;
}
EXPORT_SYMBOL(dma_map_mem);
/****************************************************************************/
/**
* Setup a descriptor ring for a given memory map.
*
* It is assumed that the descriptor ring has already been initialized, and
* this routine will only reallocate a new descriptor ring if the existing
* one is too small.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
DMA_MemMap_t *memMap, /* Memory map that will be used */
dma_addr_t devPhysAddr /* Physical address of device */
) {
int rc;
int numDescriptors;
DMA_DeviceAttribute_t *devAttr;
DMA_Region_t *region;
DMA_Segment_t *segment;
dma_addr_t srcPhysAddr;
dma_addr_t dstPhysAddr;
int regionIdx;
int segmentIdx;
devAttr = &DMA_gDeviceAttribute[dev];
down(&memMap->lock);
/* Figure out how many descriptors we need */
numDescriptors = 0;
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
region = &memMap->region[regionIdx];
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
segmentIdx++) {
segment = &region->segment[segmentIdx];
if (memMap->dir == DMA_TO_DEVICE) {
srcPhysAddr = segment->physAddr;
dstPhysAddr = devPhysAddr;
} else {
srcPhysAddr = devPhysAddr;
dstPhysAddr = segment->physAddr;
}
rc =
dma_calculate_descriptor_count(dev, srcPhysAddr,
dstPhysAddr,
segment->
numBytes);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_calculate_descriptor_count failed: %d\n",
__func__, rc);
goto out;
}
numDescriptors += rc;
}
}
/* Adjust the size of the ring, if it isn't big enough */
if (numDescriptors > devAttr->ring.descriptorsAllocated) {
dma_free_descriptor_ring(&devAttr->ring);
rc =
dma_alloc_descriptor_ring(&devAttr->ring,
numDescriptors);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_alloc_descriptor_ring failed: %d\n",
__func__, rc);
goto out;
}
} else {
rc =
dma_init_descriptor_ring(&devAttr->ring,
numDescriptors);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_init_descriptor_ring failed: %d\n",
__func__, rc);
goto out;
}
}
/* Populate the descriptors */
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
region = &memMap->region[regionIdx];
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
segmentIdx++) {
segment = &region->segment[segmentIdx];
if (memMap->dir == DMA_TO_DEVICE) {
srcPhysAddr = segment->physAddr;
dstPhysAddr = devPhysAddr;
} else {
srcPhysAddr = devPhysAddr;
dstPhysAddr = segment->physAddr;
}
rc =
dma_add_descriptors(&devAttr->ring, dev,
srcPhysAddr, dstPhysAddr,
segment->numBytes);
if (rc < 0) {
printk(KERN_ERR
"%s: dma_add_descriptors failed: %d\n",
__func__, rc);
goto out;
}
}
}
rc = 0;
out:
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_map_create_descriptor_ring);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
int dirtied /* non-zero if any of the pages were modified */
) {
int rc = 0;
int regionIdx;
int segmentIdx;
DMA_Region_t *region;
DMA_Segment_t *segment;
down(&memMap->lock);
for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
region = &memMap->region[regionIdx];
for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
segmentIdx++) {
segment = &region->segment[segmentIdx];
switch (region->memType) {
case DMA_MEM_TYPE_VMALLOC:
{
printk(KERN_ERR
"%s: vmalloc'd pages are not yet supported\n",
__func__);
rc = -EINVAL;
goto out;
}
case DMA_MEM_TYPE_KMALLOC:
{
#if ALLOW_MAP_OF_KMALLOC_MEMORY
dma_unmap_single(NULL,
segment->physAddr,
segment->numBytes,
memMap->dir);
#endif
break;
}
case DMA_MEM_TYPE_DMA:
{
dma_sync_single_for_cpu(NULL,
segment->
physAddr,
segment->
numBytes,
memMap->dir);
break;
}
case DMA_MEM_TYPE_USER:
{
/* Nothing to do here. */
break;
}
default:
{
printk(KERN_ERR
"%s: Unsupported memory type: %d\n",
__func__, region->memType);
rc = -EINVAL;
goto out;
}
}
segment->virtAddr = NULL;
segment->physAddr = 0;
segment->numBytes = 0;
}
if (region->numLockedPages > 0) {
int pageIdx;
/* Some user pages were locked. We need to go and unlock them now. */
for (pageIdx = 0; pageIdx < region->numLockedPages;
pageIdx++) {
struct page *page =
region->lockedPages[pageIdx];
if (memMap->dir == DMA_FROM_DEVICE) {
SetPageDirty(page);
}
page_cache_release(page);
}
kfree(region->lockedPages);
region->numLockedPages = 0;
region->lockedPages = NULL;
}
region->memType = DMA_MEM_TYPE_NONE;
region->virtAddr = NULL;
region->numBytes = 0;
region->numSegmentsUsed = 0;
}
memMap->userTask = NULL;
memMap->numRegionsUsed = 0;
memMap->inUse = 0;
out:
up(&memMap->lock);
return rc;
}
EXPORT_SYMBOL(dma_unmap);
......@@ -26,15 +26,9 @@
/* ---- Include Files ---------------------------------------------------- */
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/semaphore.h>
#include <csp/dmacHw.h>
#include <mach/timer.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
/* ---- Constants and Types ---------------------------------------------- */
......@@ -111,78 +105,6 @@ typedef struct {
} DMA_DescriptorRing_t;
/****************************************************************************
*
* The DMA_MemType_t and DMA_MemMap_t are helper structures used to setup
* DMA chains from a variety of memory sources.
*
*****************************************************************************/
#define DMA_MEM_MAP_MIN_SIZE 4096 /* Pages less than this size are better */
/* off not being DMA'd. */
typedef enum {
DMA_MEM_TYPE_NONE, /* Not a valid setting */
DMA_MEM_TYPE_VMALLOC, /* Memory came from vmalloc call */
DMA_MEM_TYPE_KMALLOC, /* Memory came from kmalloc call */
DMA_MEM_TYPE_DMA, /* Memory came from dma_alloc_xxx call */
DMA_MEM_TYPE_USER, /* Memory came from user space. */
} DMA_MemType_t;
/* A segment represents a physically and virtually contiguous chunk of memory. */
/* i.e. each segment can be DMA'd */
/* A user of the DMA code will add memory regions. Each region may need to be */
/* represented by one or more segments. */
typedef struct {
void *virtAddr; /* Virtual address used for this segment */
dma_addr_t physAddr; /* Physical address this segment maps to */
size_t numBytes; /* Size of the segment, in bytes */
} DMA_Segment_t;
/* A region represents a virtually contiguous chunk of memory, which may be */
/* made up of multiple segments. */
typedef struct {
DMA_MemType_t memType;
void *virtAddr;
size_t numBytes;
/* Each region (virtually contiguous) consists of one or more segments. Each */
/* segment is virtually and physically contiguous. */
int numSegmentsUsed;
int numSegmentsAllocated;
DMA_Segment_t *segment;
/* When a region corresponds to user memory, we need to lock all of the pages */
/* down before we can figure out the physical addresses. The lockedPage array contains */
/* the pages that were locked, and which subsequently need to be unlocked once the */
/* memory is unmapped. */
unsigned numLockedPages;
struct page **lockedPages;
} DMA_Region_t;
typedef struct {
int inUse; /* Is this mapping currently being used? */
struct semaphore lock; /* Acquired when using this structure */
enum dma_data_direction dir; /* Direction this transfer is intended for */
/* In the event that we're mapping user memory, we need to know which task */
/* the memory is for, so that we can obtain the correct mm locks. */
struct task_struct *userTask;
int numRegionsUsed;
int numRegionsAllocated;
DMA_Region_t *region;
} DMA_MemMap_t;
/****************************************************************************
*
* The DMA_DeviceAttribute_t contains information which describes a
......@@ -568,124 +490,6 @@ int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */
size_t numBytes /* Number of bytes in each destination buffer */
);
/****************************************************************************/
/**
* Initializes a DMA_MemMap_t data structure
*/
/****************************************************************************/
int dma_init_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
);
/****************************************************************************/
/**
* Releases any memory currently being held by a memory mapping structure.
*/
/****************************************************************************/
int dma_term_mem_map(DMA_MemMap_t *memMap /* Stores state information about the map */
);
/****************************************************************************/
/**
* Looks at a memory address and categorizes it.
*
* @return One of the values from the DMA_MemType_t enumeration.
*/
/****************************************************************************/
DMA_MemType_t dma_mem_type(void *addr);
/****************************************************************************/
/**
* Sets the process (aka userTask) associated with a mem map. This is
* required if user-mode segments will be added to the mapping.
*/
/****************************************************************************/
static inline void dma_mem_map_set_user_task(DMA_MemMap_t *memMap,
struct task_struct *task)
{
memMap->userTask = task;
}
/****************************************************************************/
/**
* Looks at a memory address and determines if we support DMA'ing to/from
* that type of memory.
*
* @return boolean -
* return value != 0 means dma supported
* return value == 0 means dma not supported
*/
/****************************************************************************/
int dma_mem_supports_dma(void *addr);
/****************************************************************************/
/**
* Initializes a memory map for use. Since this function acquires a
* sempaphore within the memory map, it is VERY important that dma_unmap
* be called when you're finished using the map.
*/
/****************************************************************************/
int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
enum dma_data_direction dir /* Direction that the mapping will be going */
);
/****************************************************************************/
/**
* Adds a segment of memory to a memory map.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *mem, /* Virtual address that we want to get a map of */
size_t numBytes /* Number of bytes being mapped */
);
/****************************************************************************/
/**
* Creates a descriptor ring from a memory mapping.
*
* @return 0 on success, error code otherwise.
*/
/****************************************************************************/
int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
DMA_MemMap_t *memMap, /* Memory map that will be used */
dma_addr_t devPhysAddr /* Physical address of device */
);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
void *addr, /* Virtual address that we want to get a map of */
size_t count, /* Number of bytes being mapped */
enum dma_data_direction dir /* Direction that the mapping will be going */
);
/****************************************************************************/
/**
* Maps in a memory region such that it can be used for performing a DMA.
*
* @return
*/
/****************************************************************************/
int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
int dirtied /* non-zero if any of the pages were modified */
);
/****************************************************************************/
/**
* Initiates a transfer when the descriptors have already been setup.
......
......@@ -44,7 +44,7 @@
#include <mach/aemif.h>
#include <mach/spi.h>
#define DA850_EVM_PHY_ID "0:00"
#define DA850_EVM_PHY_ID "davinci_mdio-0:00"
#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8)
#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15)
......
......@@ -54,7 +54,7 @@ static inline int have_tvp7002(void)
return 0;
}
#define DM365_EVM_PHY_ID "0:01"
#define DM365_EVM_PHY_ID "davinci_mdio-0:01"
/*
* A MAX-II CPLD is used for various board control functions.
*/
......
......@@ -40,7 +40,7 @@
#include <mach/usb.h>
#include <mach/aemif.h>
#define DM644X_EVM_PHY_ID "0:01"
#define DM644X_EVM_PHY_ID "davinci_mdio-0:01"
#define LXT971_PHY_ID (0x001378e2)
#define LXT971_PHY_MASK (0xfffffff0)
......
......@@ -736,7 +736,7 @@ static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
#define DM646X_EVM_PHY_ID "0:01"
#define DM646X_EVM_PHY_ID "davinci_mdio-0:01"
/*
* The following EDMA channels/slots are not being used by drivers (for
* example: Timer, GPIO, UART events etc) on dm646x, hence they are being
......
......@@ -39,7 +39,7 @@
#include <mach/mmc.h>
#include <mach/usb.h>
#define NEUROS_OSD2_PHY_ID "0:01"
#define NEUROS_OSD2_PHY_ID "davinci_mdio-0:01"
#define LXT971_PHY_ID 0x001378e2
#define LXT971_PHY_MASK 0xfffffff0
......
......@@ -21,7 +21,7 @@
#include <mach/da8xx.h>
#include <mach/mux.h>
#define HAWKBOARD_PHY_ID "0:07"
#define HAWKBOARD_PHY_ID "davinci_mdio-0:07"
#define DA850_HAWK_MMCSD_CD_PIN GPIO_TO_PIN(3, 12)
#define DA850_HAWK_MMCSD_WP_PIN GPIO_TO_PIN(3, 13)
......
......@@ -42,7 +42,7 @@
#include <mach/mux.h>
#include <mach/usb.h>
#define SFFSDR_PHY_ID "0:01"
#define SFFSDR_PHY_ID "davinci_mdio-0:01"
static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0
* UBL: Block 1
......
......@@ -153,34 +153,6 @@ static struct clk pll1_sysclk3 = {
.div_reg = PLLDIV3,
};
static struct clk pll1_sysclk4 = {
.name = "pll1_sysclk4",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV4,
};
static struct clk pll1_sysclk5 = {
.name = "pll1_sysclk5",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV5,
};
static struct clk pll1_sysclk6 = {
.name = "pll0_sysclk6",
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV6,
};
static struct clk pll1_sysclk7 = {
.name = "pll1_sysclk7",
.parent = &pll1_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV7,
};
static struct clk i2c0_clk = {
.name = "i2c0",
.parent = &pll0_aux_clk,
......@@ -397,10 +369,6 @@ static struct clk_lookup da850_clks[] = {
CLK(NULL, "pll1_aux", &pll1_aux_clk),
CLK(NULL, "pll1_sysclk2", &pll1_sysclk2),
CLK(NULL, "pll1_sysclk3", &pll1_sysclk3),
CLK(NULL, "pll1_sysclk4", &pll1_sysclk4),
CLK(NULL, "pll1_sysclk5", &pll1_sysclk5),
CLK(NULL, "pll1_sysclk6", &pll1_sysclk6),
CLK(NULL, "pll1_sysclk7", &pll1_sysclk7),
CLK("i2c_davinci.1", NULL, &i2c0_clk),
CLK(NULL, "timer0", &timerp64_0_clk),
CLK("watchdog", NULL, &timerp64_1_clk),
......
......@@ -213,13 +213,12 @@ config MACH_OMAP3_PANDORA
depends on ARCH_OMAP3
default y
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_OMAP3_TOUCHBOOK
bool "OMAP3 Touch Book"
depends on ARCH_OMAP3
default y
select BACKLIGHT_CLASS_DEVICE
config MACH_OMAP_3430SDP
bool "OMAP 3430 SDP board"
......@@ -265,7 +264,7 @@ config MACH_OMAP_ZOOM2
select SERIAL_8250
select SERIAL_CORE_CONSOLE
select SERIAL_8250_CONSOLE
select REGULATOR_FIXED_VOLTAGE
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_OMAP_ZOOM3
bool "OMAP3630 Zoom3 board"
......@@ -275,7 +274,7 @@ config MACH_OMAP_ZOOM3
select SERIAL_8250
select SERIAL_CORE_CONSOLE
select SERIAL_8250_CONSOLE
select REGULATOR_FIXED_VOLTAGE
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_CM_T35
bool "CompuLab CM-T35/CM-T3730 modules"
......@@ -334,7 +333,7 @@ config MACH_OMAP_4430SDP
depends on ARCH_OMAP4
select OMAP_PACKAGE_CBL
select OMAP_PACKAGE_CBS
select REGULATOR_FIXED_VOLTAGE
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_OMAP4_PANDA
bool "OMAP4 Panda Board"
......@@ -342,7 +341,7 @@ config MACH_OMAP4_PANDA
depends on ARCH_OMAP4
select OMAP_PACKAGE_CBL
select OMAP_PACKAGE_CBS
select REGULATOR_FIXED_VOLTAGE
select REGULATOR_FIXED_VOLTAGE if REGULATOR
config OMAP3_EMU
bool "OMAP3 debugging peripherals"
......
......@@ -405,6 +405,7 @@ static int omap_mcspi_init(struct omap_hwmod *oh, void *unused)
break;
default:
pr_err("Invalid McSPI Revision value\n");
kfree(pdata);
return -EINVAL;
}
......
......@@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
case GPMC_CONFIG_DEV_SIZE:
regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
/* clear 2 target bits */
regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
/* set the proper value */
regval |= GPMC_CONFIG1_DEVICESIZE(wval);
gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
break;
......
......@@ -175,14 +175,15 @@ static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
{
u32 reg;
if (mmc->slots[0].internal_clock) {
reg = omap_ctrl_readl(control_devconf1_offset);
reg = omap_ctrl_readl(control_devconf1_offset);
if (mmc->slots[0].internal_clock)
reg |= OMAP2_MMCSDIO2ADPCLKISEL;
omap_ctrl_writel(reg, control_devconf1_offset);
}
else
reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
omap_ctrl_writel(reg, control_devconf1_offset);
}
static void hsmmc23_before_set_reg(struct device *dev, int slot,
static void hsmmc2_before_set_reg(struct device *dev, int slot,
int power_on, int vdd)
{
struct omap_mmc_platform_data *mmc = dev->platform_data;
......@@ -407,14 +408,13 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
c->caps &= ~MMC_CAP_8_BIT_DATA;
c->caps |= MMC_CAP_4_BIT_DATA;
}
/* FALLTHROUGH */
case 3:
if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
/* off-chip level shifting, or none */
mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
mmc->slots[0].before_set_reg = hsmmc2_before_set_reg;
mmc->slots[0].after_set_reg = NULL;
}
break;
case 3:
case 4:
case 5:
mmc->slots[0].before_set_reg = NULL;
......
......@@ -388,7 +388,7 @@ static void __init omap_hwmod_init_postsetup(void)
omap_pm_if_early_init();
}
#ifdef CONFIG_ARCH_OMAP2
#ifdef CONFIG_SOC_OMAP2420
void __init omap2420_init_early(void)
{
omap2_set_globals_242x();
......@@ -400,7 +400,9 @@ void __init omap2420_init_early(void)
omap_hwmod_init_postsetup();
omap2420_clk_init();
}
#endif
#ifdef CONFIG_SOC_OMAP2430
void __init omap2430_init_early(void)
{
omap2_set_globals_243x();
......
......@@ -55,27 +55,6 @@ struct omap_hwmod_class omap2_dss_hwmod_class = {
.reset = omap_dss_reset,
};
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap2_dispc_sysc,
};
/*
* 'rfbi' class
* remote frame buffer interface
......
......@@ -28,6 +28,28 @@ struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
{ .name = "dispc", .dma_req = 5 },
{ .dma_req = -1 }
};
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
struct omap_hwmod_class omap2_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap2_dispc_sysc,
};
/* OMAP2xxx Timer Common */
static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
.rev_offs = 0x0000,
......
......@@ -1480,6 +1480,28 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
.masters_cnt = ARRAY_SIZE(omap3xxx_dss_masters),
};
/*
* 'dispc' class
* display controller
*/
static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
SYSC_HAS_ENAWAKEUP),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
static struct omap_hwmod_class omap3_dispc_hwmod_class = {
.name = "dispc",
.sysc = &omap3_dispc_sysc,
};
/* l4_core -> dss_dispc */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
.master = &omap3xxx_l4_core_hwmod,
......@@ -1503,7 +1525,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
.name = "dss_dispc",
.class = &omap2_dispc_hwmod_class,
.class = &omap3_dispc_hwmod_class,
.mpu_irqs = omap2_dispc_irqs,
.main_clk = "dss1_alwon_fck",
.prcm = {
......@@ -3523,12 +3545,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
&omap3xxx_uart2_hwmod,
&omap3xxx_uart3_hwmod,
/* dss class */
&omap3xxx_dss_dispc_hwmod,
&omap3xxx_dss_dsi1_hwmod,
&omap3xxx_dss_rfbi_hwmod,
&omap3xxx_dss_venc_hwmod,
/* i2c class */
&omap3xxx_i2c1_hwmod,
&omap3xxx_i2c2_hwmod,
......@@ -3635,6 +3651,15 @@ static __initdata struct omap_hwmod *am35xx_hwmods[] = {
NULL
};
static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = {
/* dss class */
&omap3xxx_dss_dispc_hwmod,
&omap3xxx_dss_dsi1_hwmod,
&omap3xxx_dss_rfbi_hwmod,
&omap3xxx_dss_venc_hwmod,
NULL
};
int __init omap3xxx_hwmod_init(void)
{
int r;
......@@ -3708,6 +3733,21 @@ int __init omap3xxx_hwmod_init(void)
if (h)
r = omap_hwmod_register(h);
if (r < 0)
return r;
/*
* DSS code presumes that dss_core hwmod is handled first,
* _before_ any other DSS related hwmods so register common
* DSS hwmods last to ensure that dss_core is already registered.
* Otherwise some change things may happen, for ex. if dispc
* is handled before dss_core and DSS is enabled in bootloader
* DIPSC will be reset with outputs enabled which sometimes leads
* to unrecoverable L3 error.
* XXX The long-term fix to this is to ensure modules are set up
* in dependency order in the hwmod core code.
*/
r = omap_hwmod_register(omap3xxx_dss_hwmods);
return r;
}
......@@ -1031,6 +1031,7 @@ static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
{
.name = "mpu",
.pa_start = 0x4012e000,
.pa_end = 0x4012e07f,
.flags = ADDR_TYPE_RT
......@@ -1049,6 +1050,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
{
.name = "dma",
.pa_start = 0x4902e000,
.pa_end = 0x4902e07f,
.flags = ADDR_TYPE_RT
......
......@@ -19,6 +19,7 @@
#include "common.h"
#include <plat/cpu.h>
#include <plat/prcm.h>
#include <plat/irqs.h>
#include "vp.h"
......
......@@ -897,7 +897,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
ret = sr_late_init(sr_info);
if (ret) {
pr_warning("%s: Error in SR late init\n", __func__);
return ret;
goto err_iounmap;
}
}
......
......@@ -270,7 +270,7 @@ static struct clocksource clocksource_gpt = {
static u32 notrace dmtimer_read_sched_clock(void)
{
if (clksrc.reserved)
return __omap_dm_timer_read_counter(clksrc.io_base, 1);
return __omap_dm_timer_read_counter(&clksrc, 1);
return 0;
}
......
......@@ -1018,7 +1018,7 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
match = of_match_device(omap_i2c_of_match, &pdev->dev);
match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
if (match) {
u32 freq = 100000; /* default to 100000 Hz */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment