Commit 4b85faed authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: add a dma_alloc_need_uncached helper

Check if we need to allocate uncached memory for a device given the
allocation flags.  Switch over the uncached segment check to this helper
to deal with architectures that do not support the dma_cache_sync
operation and thus should not returned cacheable memory for
DMA_ATTR_NON_CONSISTENT allocations.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 961729bf
...@@ -20,6 +20,20 @@ static inline bool dev_is_dma_coherent(struct device *dev) ...@@ -20,6 +20,20 @@ static inline bool dev_is_dma_coherent(struct device *dev)
} }
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
/*
* Check if an allocation needs to be marked uncached to be coherent.
*/
static inline bool dma_alloc_need_uncached(struct device *dev,
unsigned long attrs)
{
if (dev_is_dma_coherent(dev))
return false;
if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
(attrs & DMA_ATTR_NON_CONSISTENT))
return false;
return true;
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs); gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
......
...@@ -160,7 +160,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -160,7 +160,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
memset(ret, 0, size); memset(ret, 0, size);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { dma_alloc_need_uncached(dev, attrs)) {
arch_dma_prep_coherent(page, size); arch_dma_prep_coherent(page, size);
ret = uncached_kernel_address(ret); ret = uncached_kernel_address(ret);
} }
...@@ -182,7 +182,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -182,7 +182,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) dma_alloc_need_uncached(dev, attrs))
cpu_addr = cached_kernel_address(cpu_addr); cpu_addr = cached_kernel_address(cpu_addr);
__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr)); __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment