Commit a445e940 authored by Vladimir Murzin's avatar Vladimir Murzin Committed by Christoph Hellwig

dma-mapping: fix handling of dma-ranges for reserved memory (again)

Daniele reported that issue previously fixed in c41f9ea9
("drivers: dma-coherent: Account dma_pfn_offset when used with device
tree") reappear shortly after 43fc509c ("dma-coherent: introduce
interface for default DMA pool") where fix was accidentally dropped.

Lets put fix back in place and respect dma-ranges for reserved memory.

Fixes: 43fc509c ("dma-coherent: introduce interface for default DMA pool")
Reported-by: default avatarDaniele Alessandrelli <daniele.alessandrelli@gmail.com>
Tested-by: default avatarDaniele Alessandrelli <daniele.alessandrelli@gmail.com>
Tested-by: default avatarAlexandre Torgue <alexandre.torgue@st.com>
Signed-off-by: default avatarVladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 320000e7
...@@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, ...@@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
void *ret = dma_alloc_from_global_coherent(size, dma_handle); void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
/* /*
* dma_alloc_from_global_coherent() may fail because: * dma_alloc_from_global_coherent() may fail because:
......
...@@ -162,7 +162,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); ...@@ -162,7 +162,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret); void *cpu_addr, size_t size, int *ret);
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
int dma_release_from_global_coherent(int order, void *vaddr); int dma_release_from_global_coherent(int order, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
size_t size, int *ret); size_t size, int *ret);
...@@ -172,7 +172,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, ...@@ -172,7 +172,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
#define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
static inline void *dma_alloc_from_global_coherent(ssize_t size, static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle) dma_addr_t *dma_handle)
{ {
return NULL; return NULL;
......
...@@ -123,7 +123,8 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, ...@@ -123,7 +123,8 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
return ret; return ret;
} }
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, static void *__dma_alloc_from_coherent(struct device *dev,
struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle) ssize_t size, dma_addr_t *dma_handle)
{ {
int order = get_order(size); int order = get_order(size);
...@@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, ...@@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
/* /*
* Memory was found in the coherent area. * Memory was found in the coherent area.
*/ */
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT); *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
ret = mem->virt_base + (pageno << PAGE_SHIFT); ret = mem->virt_base + (pageno << PAGE_SHIFT);
spin_unlock_irqrestore(&mem->spinlock, flags); spin_unlock_irqrestore(&mem->spinlock, flags);
memset(ret, 0, size); memset(ret, 0, size);
...@@ -175,16 +176,17 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, ...@@ -175,16 +176,17 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
if (!mem) if (!mem)
return 0; return 0;
*ret = __dma_alloc_from_coherent(mem, size, dma_handle); *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
return 1; return 1;
} }
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle)
{ {
if (!dma_coherent_default_memory) if (!dma_coherent_default_memory)
return NULL; return NULL;
return __dma_alloc_from_coherent(dma_coherent_default_memory, size, return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
dma_handle); dma_handle);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment