Commit baa676fc authored by Andrzej Pietrasiewicz's avatar Andrzej Pietrasiewicz Committed by Marek Szyprowski

X86 & IA64: adapt for dma_map_ops changes

Adapt core x86 and IA64 architecture code for dma_map_ops changes: replace
alloc/free_coherent with generic alloc/free methods.
Signed-off-by: default avatarAndrzej Pietrasiewicz <andrzej.p@samsung.com>
Acked-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
[removed swiotlb related changes and replaced it with wrappers,
 merged with IA64 patch to avoid inter-patch dependences in intel-iommu code]
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
Acked-by: default avatarTony Luck <tony.luck@intel.com>
parent 613c4578
...@@ -1130,7 +1130,8 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, ...@@ -1130,7 +1130,8 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
* See Documentation/DMA-API-HOWTO.txt * See Documentation/DMA-API-HOWTO.txt
*/ */
static void * static void *
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, struct dma_attrs *attrs)
{ {
struct ioc *ioc; struct ioc *ioc;
void *addr; void *addr;
...@@ -1192,8 +1193,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp ...@@ -1192,8 +1193,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
* *
* See Documentation/DMA-API-HOWTO.txt * See Documentation/DMA-API-HOWTO.txt
*/ */
static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle) dma_addr_t dma_handle, struct dma_attrs *attrs)
{ {
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size)); free_pages((unsigned long) vaddr, get_order(size));
...@@ -2213,8 +2214,8 @@ sba_page_override(char *str) ...@@ -2213,8 +2214,8 @@ sba_page_override(char *str)
__setup("sbapagesize=",sba_page_override); __setup("sbapagesize=",sba_page_override);
struct dma_map_ops sba_dma_ops = { struct dma_map_ops sba_dma_ops = {
.alloc_coherent = sba_alloc_coherent, .alloc = sba_alloc_coherent,
.free_coherent = sba_free_coherent, .free = sba_free_coherent,
.map_page = sba_map_page, .map_page = sba_map_page,
.unmap_page = sba_unmap_page, .unmap_page = sba_unmap_page,
.map_sg = sba_map_sg_attrs, .map_sg = sba_map_sg_attrs,
......
...@@ -23,23 +23,29 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, ...@@ -23,23 +23,29 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction); enum dma_data_direction);
static inline void *dma_alloc_coherent(struct device *dev, size_t size, #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
dma_addr_t *daddr, gfp_t gfp)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *daddr, gfp_t gfp,
struct dma_attrs *attrs)
{ {
struct dma_map_ops *ops = platform_dma_get_ops(dev); struct dma_map_ops *ops = platform_dma_get_ops(dev);
void *caddr; void *caddr;
caddr = ops->alloc_coherent(dev, size, daddr, gfp); caddr = ops->alloc(dev, size, daddr, gfp, attrs);
debug_dma_alloc_coherent(dev, size, *daddr, caddr); debug_dma_alloc_coherent(dev, size, *daddr, caddr);
return caddr; return caddr;
} }
static inline void dma_free_coherent(struct device *dev, size_t size, #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
void *caddr, dma_addr_t daddr)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *caddr, dma_addr_t daddr,
struct dma_attrs *attrs)
{ {
struct dma_map_ops *ops = platform_dma_get_ops(dev); struct dma_map_ops *ops = platform_dma_get_ops(dev);
debug_dma_free_coherent(dev, size, caddr, daddr); debug_dma_free_coherent(dev, size, caddr, daddr);
ops->free_coherent(dev, size, caddr, daddr); ops->free(dev, size, caddr, daddr, attrs);
} }
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
......
...@@ -15,16 +15,24 @@ int swiotlb __read_mostly; ...@@ -15,16 +15,24 @@ int swiotlb __read_mostly;
EXPORT_SYMBOL(swiotlb); EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp) dma_addr_t *dma_handle, gfp_t gfp,
struct dma_attrs *attrs)
{ {
if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA; gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
} }
static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
struct dma_map_ops swiotlb_dma_ops = { struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = ia64_swiotlb_alloc_coherent, .alloc = ia64_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent, .free = ia64_swiotlb_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page, .unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs, .map_sg = swiotlb_map_sg_attrs,
......
...@@ -76,7 +76,8 @@ EXPORT_SYMBOL(sn_dma_set_mask); ...@@ -76,7 +76,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
* more information. * more information.
*/ */
static void *sn_dma_alloc_coherent(struct device *dev, size_t size, static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t flags) dma_addr_t * dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{ {
void *cpuaddr; void *cpuaddr;
unsigned long phys_addr; unsigned long phys_addr;
...@@ -137,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -137,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* any associated IOMMU mappings. * any associated IOMMU mappings.
*/ */
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle) dma_addr_t dma_handle, struct dma_attrs *attrs)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
...@@ -466,8 +467,8 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) ...@@ -466,8 +467,8 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
} }
static struct dma_map_ops sn_dma_ops = { static struct dma_map_ops sn_dma_ops = {
.alloc_coherent = sn_dma_alloc_coherent, .alloc = sn_dma_alloc_coherent,
.free_coherent = sn_dma_free_coherent, .free = sn_dma_free_coherent,
.map_page = sn_dma_map_page, .map_page = sn_dma_map_page,
.unmap_page = sn_dma_unmap_page, .unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg, .map_sg = sn_dma_map_sg,
......
...@@ -59,7 +59,8 @@ extern int dma_supported(struct device *hwdev, u64 mask); ...@@ -59,7 +59,8 @@ extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask);
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag); dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
...@@ -111,9 +112,11 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) ...@@ -111,9 +112,11 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
return gfp; return gfp;
} }
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
static inline void * static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp) gfp_t gfp, struct dma_attrs *attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); struct dma_map_ops *ops = get_dma_ops(dev);
void *memory; void *memory;
...@@ -129,18 +132,21 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -129,18 +132,21 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (!is_device_dma_capable(dev)) if (!is_device_dma_capable(dev))
return NULL; return NULL;
if (!ops->alloc_coherent) if (!ops->alloc)
return NULL; return NULL;
memory = ops->alloc_coherent(dev, size, dma_handle, memory = ops->alloc(dev, size, dma_handle,
dma_alloc_coherent_gfp_flags(dev, gfp)); dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory); debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory; return memory;
} }
static inline void dma_free_coherent(struct device *dev, size_t size, #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
void *vaddr, dma_addr_t bus)
static inline void dma_free_attrs(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus,
struct dma_attrs *attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); struct dma_map_ops *ops = get_dma_ops(dev);
...@@ -150,8 +156,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, ...@@ -150,8 +156,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
return; return;
debug_dma_free_coherent(dev, size, vaddr, bus); debug_dma_free_coherent(dev, size, vaddr, bus);
if (ops->free_coherent) if (ops->free)
ops->free_coherent(dev, size, vaddr, bus); ops->free(dev, size, vaddr, bus, attrs);
} }
#endif #endif
...@@ -477,7 +477,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -477,7 +477,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
/* allocate and map a coherent mapping */ /* allocate and map a coherent mapping */
static void * static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
gfp_t flag) gfp_t flag, struct dma_attrs *attrs)
{ {
dma_addr_t paddr; dma_addr_t paddr;
unsigned long align_mask; unsigned long align_mask;
...@@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, ...@@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
} }
__free_pages(page, get_order(size)); __free_pages(page, get_order(size));
} else } else
return dma_generic_alloc_coherent(dev, size, dma_addr, flag); return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
attrs);
return NULL; return NULL;
} }
...@@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, ...@@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
/* free a coherent mapping */ /* free a coherent mapping */
static void static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr, gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr) dma_addr_t dma_addr, struct dma_attrs *attrs)
{ {
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
...@@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = { ...@@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = {
.unmap_sg = gart_unmap_sg, .unmap_sg = gart_unmap_sg,
.map_page = gart_map_page, .map_page = gart_map_page,
.unmap_page = gart_unmap_page, .unmap_page = gart_unmap_page,
.alloc_coherent = gart_alloc_coherent, .alloc = gart_alloc_coherent,
.free_coherent = gart_free_coherent, .free = gart_free_coherent,
.mapping_error = gart_mapping_error, .mapping_error = gart_mapping_error,
}; };
......
...@@ -431,7 +431,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, ...@@ -431,7 +431,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
} }
static void* calgary_alloc_coherent(struct device *dev, size_t size, static void* calgary_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
{ {
void *ret = NULL; void *ret = NULL;
dma_addr_t mapping; dma_addr_t mapping;
...@@ -464,7 +464,8 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, ...@@ -464,7 +464,8 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
} }
static void calgary_free_coherent(struct device *dev, size_t size, static void calgary_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{ {
unsigned int npages; unsigned int npages;
struct iommu_table *tbl = find_iommu_table(dev); struct iommu_table *tbl = find_iommu_table(dev);
...@@ -477,8 +478,8 @@ static void calgary_free_coherent(struct device *dev, size_t size, ...@@ -477,8 +478,8 @@ static void calgary_free_coherent(struct device *dev, size_t size,
} }
static struct dma_map_ops calgary_dma_ops = { static struct dma_map_ops calgary_dma_ops = {
.alloc_coherent = calgary_alloc_coherent, .alloc = calgary_alloc_coherent,
.free_coherent = calgary_free_coherent, .free = calgary_free_coherent,
.map_sg = calgary_map_sg, .map_sg = calgary_map_sg,
.unmap_sg = calgary_unmap_sg, .unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page, .map_page = calgary_map_page,
......
...@@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void) ...@@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void)
} }
} }
void *dma_generic_alloc_coherent(struct device *dev, size_t size, void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag) dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{ {
unsigned long dma_mask; unsigned long dma_mask;
struct page *page; struct page *page;
......
...@@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
} }
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr) dma_addr_t dma_addr, struct dma_attrs *attrs)
{ {
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
} }
...@@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev, ...@@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev,
} }
struct dma_map_ops nommu_dma_ops = { struct dma_map_ops nommu_dma_ops = {
.alloc_coherent = dma_generic_alloc_coherent, .alloc = dma_generic_alloc_coherent,
.free_coherent = nommu_free_coherent, .free = nommu_free_coherent,
.map_sg = nommu_map_sg, .map_sg = nommu_map_sg,
.map_page = nommu_map_page, .map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device, .sync_single_for_device = nommu_sync_single_for_device,
......
...@@ -15,21 +15,30 @@ ...@@ -15,21 +15,30 @@
int swiotlb __read_mostly; int swiotlb __read_mostly;
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags) dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{ {
void *vaddr; void *vaddr;
vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
attrs);
if (vaddr) if (vaddr)
return vaddr; return vaddr;
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
} }
static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
static struct dma_map_ops swiotlb_dma_ops = { static struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = x86_swiotlb_alloc_coherent, .alloc = x86_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent, .free = x86_swiotlb_free_coherent,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device, .sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
......
...@@ -12,8 +12,8 @@ int xen_swiotlb __read_mostly; ...@@ -12,8 +12,8 @@ int xen_swiotlb __read_mostly;
static struct dma_map_ops xen_swiotlb_dma_ops = { static struct dma_map_ops xen_swiotlb_dma_ops = {
.mapping_error = xen_swiotlb_dma_mapping_error, .mapping_error = xen_swiotlb_dma_mapping_error,
.alloc_coherent = xen_swiotlb_alloc_coherent, .alloc = xen_swiotlb_alloc_coherent,
.free_coherent = xen_swiotlb_free_coherent, .free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
.sync_single_for_device = xen_swiotlb_sync_single_for_device, .sync_single_for_device = xen_swiotlb_sync_single_for_device,
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
......
...@@ -2707,7 +2707,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -2707,7 +2707,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
* The exported alloc_coherent function for dma_ops. * The exported alloc_coherent function for dma_ops.
*/ */
static void *alloc_coherent(struct device *dev, size_t size, static void *alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag) dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{ {
unsigned long flags; unsigned long flags;
void *virt_addr; void *virt_addr;
...@@ -2765,7 +2766,8 @@ static void *alloc_coherent(struct device *dev, size_t size, ...@@ -2765,7 +2766,8 @@ static void *alloc_coherent(struct device *dev, size_t size,
* The exported free_coherent function for dma_ops. * The exported free_coherent function for dma_ops.
*/ */
static void free_coherent(struct device *dev, size_t size, static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr) void *virt_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{ {
unsigned long flags; unsigned long flags;
struct protection_domain *domain; struct protection_domain *domain;
...@@ -2846,8 +2848,8 @@ static void prealloc_protection_domains(void) ...@@ -2846,8 +2848,8 @@ static void prealloc_protection_domains(void)
} }
static struct dma_map_ops amd_iommu_dma_ops = { static struct dma_map_ops amd_iommu_dma_ops = {
.alloc_coherent = alloc_coherent, .alloc = alloc_coherent,
.free_coherent = free_coherent, .free = free_coherent,
.map_page = map_page, .map_page = map_page,
.unmap_page = unmap_page, .unmap_page = unmap_page,
.map_sg = map_sg, .map_sg = map_sg,
......
...@@ -2938,7 +2938,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -2938,7 +2938,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
} }
static void *intel_alloc_coherent(struct device *hwdev, size_t size, static void *intel_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags) dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{ {
void *vaddr; void *vaddr;
int order; int order;
...@@ -2970,7 +2971,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size, ...@@ -2970,7 +2971,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
} }
static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle) dma_addr_t dma_handle, struct dma_attrs *attrs)
{ {
int order; int order;
...@@ -3115,8 +3116,8 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -3115,8 +3116,8 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
} }
struct dma_map_ops intel_dma_ops = { struct dma_map_ops intel_dma_ops = {
.alloc_coherent = intel_alloc_coherent, .alloc = intel_alloc_coherent,
.free_coherent = intel_free_coherent, .free = intel_free_coherent,
.map_sg = intel_map_sg, .map_sg = intel_map_sg,
.unmap_sg = intel_unmap_sg, .unmap_sg = intel_unmap_sg,
.map_page = intel_map_page, .map_page = intel_map_page,
......
...@@ -204,7 +204,8 @@ void __init xen_swiotlb_init(int verbose) ...@@ -204,7 +204,8 @@ void __init xen_swiotlb_init(int verbose)
void * void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags) dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{ {
void *ret; void *ret;
int order = get_order(size); int order = get_order(size);
...@@ -253,7 +254,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); ...@@ -253,7 +254,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
void void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr) dma_addr_t dev_addr, struct dma_attrs *attrs)
{ {
int order = get_order(size); int order = get_order(size);
phys_addr_t phys; phys_addr_t phys;
......
...@@ -7,11 +7,13 @@ extern void xen_swiotlb_init(int verbose); ...@@ -7,11 +7,13 @@ extern void xen_swiotlb_init(int verbose);
extern void extern void
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags); dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs);
extern void extern void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle); void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, unsigned long offset, size_t size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment