Commit 57bf5a89 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: clear harmful GFP_* flags in common code

Lift the code from x86 so that we behave consistently.  In the future we
should probably warn if any of these is set.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJesper Nilsson <jesper.nilsson@axis.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k]
parent 205e1b7f
...@@ -22,9 +22,6 @@ static void *v32_dma_alloc(struct device *dev, size_t size, ...@@ -22,9 +22,6 @@ static void *v32_dma_alloc(struct device *dev, size_t size,
{ {
void *ret; void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA; gfp |= GFP_DMA;
......
...@@ -16,9 +16,6 @@ static void *dma_alloc(struct device *dev, size_t size, ...@@ -16,9 +16,6 @@ static void *dma_alloc(struct device *dev, size_t size,
{ {
void *ret; void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff)) if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA; gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size)); ret = (void *)__get_free_pages(gfp, get_order(size));
......
...@@ -76,8 +76,6 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, ...@@ -76,8 +76,6 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
void *ret; void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff)) if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA; gfp |= GFP_DMA;
......
...@@ -161,9 +161,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -161,9 +161,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
{ {
void *ret; void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL) if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL)
gfp |= __GFP_DMA; gfp |= __GFP_DMA;
else if (IS_ENABLED(CONFIG_ZONE_DMA) && else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
......
...@@ -15,9 +15,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -15,9 +15,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
{ {
void *ret; void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
if ((IS_ENABLED(CONFIG_ISA) && dev == NULL) || if ((IS_ENABLED(CONFIG_ISA) && dev == NULL) ||
(IS_ENABLED(CONFIG_ZONE_DMA) && (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask < DMA_BIT_MASK(32))) dev->coherent_dma_mask < DMA_BIT_MASK(32)))
......
...@@ -93,9 +93,6 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) ...@@ -93,9 +93,6 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{ {
gfp_t dma_flag; gfp_t dma_flag;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
#ifdef CONFIG_ISA #ifdef CONFIG_ISA
if (dev == NULL) if (dev == NULL)
dma_flag = __GFP_DMA; dma_flag = __GFP_DMA;
......
...@@ -47,9 +47,6 @@ static char *nlm_swiotlb; ...@@ -47,9 +47,6 @@ static char *nlm_swiotlb;
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
if (dev->coherent_dma_mask <= DMA_BIT_MASK(32)) if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32; gfp |= __GFP_DMA32;
......
...@@ -37,9 +37,6 @@ static void *mn10300_dma_alloc(struct device *dev, size_t size, ...@@ -37,9 +37,6 @@ static void *mn10300_dma_alloc(struct device *dev, size_t size,
goto done; goto done;
} }
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA; gfp |= GFP_DMA;
......
...@@ -63,9 +63,6 @@ static void *nios2_dma_alloc(struct device *dev, size_t size, ...@@ -63,9 +63,6 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
{ {
void *ret; void *ret;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
/* optimized page clearing */ /* optimized page clearing */
gfp |= __GFP_ZERO; gfp |= __GFP_ZERO;
......
...@@ -105,9 +105,6 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, ...@@ -105,9 +105,6 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
}; };
#endif /* CONFIG_FSL_SOC */ #endif /* CONFIG_FSL_SOC */
/* ignore region specifiers */
flag &= ~(__GFP_HIGHMEM);
page = alloc_pages_node(node, flag, get_order(size)); page = alloc_pages_node(node, flag, get_order(size));
if (page == NULL) if (page == NULL)
return NULL; return NULL;
......
...@@ -87,7 +87,6 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, ...@@ -87,7 +87,6 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_mask = dma_alloc_coherent_mask(dev, flag); dma_mask = dma_alloc_coherent_mask(dev, flag);
flag &= ~__GFP_ZERO;
again: again:
page = NULL; page = NULL;
/* CMA can be used only in the context which permits sleeping */ /* CMA can be used only in the context which permits sleeping */
...@@ -139,7 +138,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) ...@@ -139,7 +138,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
if (!*dev) if (!*dev)
*dev = &x86_dma_fallback_dev; *dev = &x86_dma_fallback_dev;
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
if (!is_device_dma_capable(*dev)) if (!is_device_dma_capable(*dev))
......
...@@ -518,6 +518,13 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, ...@@ -518,6 +518,13 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr; return cpu_addr;
/*
* Let the implementation decide on the zone to allocate from, and
* decide on the way of zeroing the memory given that the memory
* returned should always be zeroed.
*/
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO);
if (!arch_dma_alloc_attrs(&dev, &flag)) if (!arch_dma_alloc_attrs(&dev, &flag))
return NULL; return NULL;
if (!ops->alloc) if (!ops->alloc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment