Commit 9a47a710 authored by Helge Deller's avatar Helge Deller

parisc: ccio-dma: Fix sparse warnings

Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent c1ebb940
...@@ -214,7 +214,7 @@ struct ioa_registers { ...@@ -214,7 +214,7 @@ struct ioa_registers {
struct ioc { struct ioc {
struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */ struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
u8 *res_map; /* resource map, bit == pdir entry */ u8 *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */ __le64 *pdir_base; /* physical base address */
u32 pdir_size; /* bytes, function of IOV Space size */ u32 pdir_size; /* bytes, function of IOV Space size */
u32 res_hint; /* next available IOVP - u32 res_hint; /* next available IOVP -
circular search */ circular search */
...@@ -339,7 +339,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) ...@@ -339,7 +339,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
BUG_ON(pages_needed == 0); BUG_ON(pages_needed == 0);
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE); BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
DBG_RES("%s() size: %d pages_needed %d\n", DBG_RES("%s() size: %zu pages_needed %d\n",
__func__, size, pages_needed); __func__, size, pages_needed);
/* /*
...@@ -427,7 +427,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) ...@@ -427,7 +427,7 @@ ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE); BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
BUG_ON(pages_mapped > BITS_PER_LONG); BUG_ON(pages_mapped > BITS_PER_LONG);
DBG_RES("%s(): res_idx: %d pages_mapped %d\n", DBG_RES("%s(): res_idx: %d pages_mapped %lu\n",
__func__, res_idx, pages_mapped); __func__, res_idx, pages_mapped);
#ifdef CCIO_COLLECT_STATS #ifdef CCIO_COLLECT_STATS
...@@ -543,7 +543,7 @@ static u32 hint_lookup[] = { ...@@ -543,7 +543,7 @@ static u32 hint_lookup[] = {
* index are bits 12:19 of the value returned by LCI. * index are bits 12:19 of the value returned by LCI.
*/ */
static void static void
ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
unsigned long hints) unsigned long hints)
{ {
register unsigned long pa; register unsigned long pa;
...@@ -719,7 +719,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, ...@@ -719,7 +719,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
unsigned long flags; unsigned long flags;
dma_addr_t iovp; dma_addr_t iovp;
dma_addr_t offset; dma_addr_t offset;
u64 *pdir_start; __le64 *pdir_start;
unsigned long hint = hint_lookup[(int)direction]; unsigned long hint = hint_lookup[(int)direction];
BUG_ON(!dev); BUG_ON(!dev);
...@@ -746,8 +746,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size, ...@@ -746,8 +746,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
pdir_start = &(ioc->pdir_base[idx]); pdir_start = &(ioc->pdir_base[idx]);
DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n", DBG_RUN("%s() %px -> %#lx size: %zu\n",
__func__, addr, (long)iovp | offset, size); __func__, addr, (long)(iovp | offset), size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */ /* If not cacheline aligned, force SAFE_DMA on the whole mess */
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES)) if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
...@@ -805,7 +805,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, ...@@ -805,7 +805,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
return; return;
} }
DBG_RUN("%s() iovp 0x%lx/%x\n", DBG_RUN("%s() iovp %#lx/%zx\n",
__func__, (long)iova, size); __func__, (long)iova, size);
iova ^= offset; /* clear offset bits */ iova ^= offset; /* clear offset bits */
...@@ -1283,7 +1283,7 @@ ccio_ioc_init(struct ioc *ioc) ...@@ -1283,7 +1283,7 @@ ccio_ioc_init(struct ioc *ioc)
iova_space_size>>20, iova_space_size>>20,
iov_order + PAGE_SHIFT); iov_order + PAGE_SHIFT);
ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size)); get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) { if(NULL == ioc->pdir_base) {
panic("%s() could not allocate I/O Page Table\n", __func__); panic("%s() could not allocate I/O Page Table\n", __func__);
......
...@@ -31,8 +31,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, ...@@ -31,8 +31,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
unsigned long vaddr; unsigned long vaddr;
long size; long size;
DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents, DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents,
(unsigned long)sg_dma_address(startsg), cnt, (unsigned long)sg_dma_address(startsg),
sg_virt(startsg), startsg->length sg_virt(startsg), startsg->length
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment