Commit e67095fd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.3-5' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:
 "Two fixes for regressions in this merge window:

   - select the Kconfig symbols for the noncoherent dma arch helpers on
     arm if swiotlb is selected, not just for LPAE to not break then Xen
     build, that uses swiotlb indirectly through swiotlb-xen

   - fix the page allocator fallback in dma_alloc_contiguous if the CMA
     allocation fails"

* tag 'dma-mapping-5.3-5' of git://git.infradead.org/users/hch/dma-mapping:
  dma-direct: fix zone selection after an unaddressable CMA allocation
  arm: select the dma-noncoherent symbols for all swiotlb builds
parents 083f0f2c 90ae409f
...@@ -7,6 +7,8 @@ config ARM ...@@ -7,6 +7,8 @@ config ARM
select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KEEPINITRD select ARCH_HAS_KEEPINITRD
...@@ -18,6 +20,8 @@ config ARM ...@@ -18,6 +20,8 @@ config ARM
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU select ARCH_HAS_STRICT_MODULE_RWX if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_CUSTOM_GPIO_H
......
...@@ -664,10 +664,6 @@ config ARM_LPAE ...@@ -664,10 +664,6 @@ config ARM_LPAE
!CPU_32v4 && !CPU_32v3 !CPU_32v4 && !CPU_32v3
select PHYS_ADDR_T_64BIT select PHYS_ADDR_T_64BIT
select SWIOTLB select SWIOTLB
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU
help help
Say Y if you have an ARMv7 processor supporting the LPAE page Say Y if you have an ARMv7 processor supporting the LPAE page
table format and you would like to access memory beyond the table format and you would like to access memory beyond the
......
...@@ -965,10 +965,13 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, ...@@ -965,10 +965,13 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
{ {
bool coherent = dev_is_dma_coherent(dev); bool coherent = dev_is_dma_coherent(dev);
size_t alloc_size = PAGE_ALIGN(size); size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev);
struct page *page = NULL; struct page *page = NULL;
void *cpu_addr; void *cpu_addr;
page = dma_alloc_contiguous(dev, alloc_size, gfp); page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (!page)
page = alloc_pages_node(node, gfp, get_order(alloc_size));
if (!page) if (!page)
return NULL; return NULL;
......
...@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, ...@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
gfp_t gfp) gfp_t gfp)
{ {
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; return NULL;
size_t align = get_order(PAGE_ALIGN(size));
return alloc_pages_node(node, gfp, align);
} }
static inline void dma_free_contiguous(struct device *dev, struct page *page, static inline void dma_free_contiguous(struct device *dev, struct page *page,
......
...@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, ...@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
*/ */
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
{ {
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; size_t count = size >> PAGE_SHIFT;
size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
size_t align = get_order(PAGE_ALIGN(size));
struct page *page = NULL; struct page *page = NULL;
struct cma *cma = NULL; struct cma *cma = NULL;
...@@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) ...@@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
/* CMA can be used only in the context which permits sleeping */ /* CMA can be used only in the context which permits sleeping */
if (cma && gfpflags_allow_blocking(gfp)) { if (cma && gfpflags_allow_blocking(gfp)) {
size_t align = get_order(size);
size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
} }
/* Fallback allocation of normal pages */
if (!page)
page = alloc_pages_node(node, gfp, align);
return page; return page;
} }
......
...@@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) ...@@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev);
struct page *page = NULL; struct page *page = NULL;
u64 phys_mask; u64 phys_mask;
...@@ -95,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -95,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp &= ~__GFP_ZERO; gfp &= ~__GFP_ZERO;
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_mask); &phys_mask);
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, alloc_size);
page = NULL;
}
again: again:
page = dma_alloc_contiguous(dev, size, gfp); if (!page)
page = alloc_pages_node(node, gfp, get_order(alloc_size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size); dma_free_contiguous(dev, page, size);
page = NULL; page = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment