Commit ac1820fb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma DMA mapping updates from Doug Ledford:
 "Drop IB DMA mapping code and use core DMA code instead.

  Bart Van Assche noted that the ib DMA mapping code was significantly
  similar enough to the core DMA mapping code that with a few changes it
  was possible to remove the IB DMA mapping code entirely and switch the
  RDMA stack to use the core DMA mapping code.

  This resulted in a nice set of cleanups, but touched the entire tree
  and has been kept separate for that reason."

* tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (37 commits)
  IB/rxe, IB/rdmavt: Use dma_virt_ops instead of duplicating it
  IB/core: Remove ib_device.dma_device
  nvme-rdma: Switch from dma_device to dev.parent
  RDS: net: Switch from dma_device to dev.parent
  IB/srpt: Modify a debug statement
  IB/srp: Switch from dma_device to dev.parent
  IB/iser: Switch from dma_device to dev.parent
  IB/IPoIB: Switch from dma_device to dev.parent
  IB/rxe: Switch from dma_device to dev.parent
  IB/vmw_pvrdma: Switch from dma_device to dev.parent
  IB/usnic: Switch from dma_device to dev.parent
  IB/qib: Switch from dma_device to dev.parent
  IB/qedr: Switch from dma_device to dev.parent
  IB/ocrdma: Switch from dma_device to dev.parent
  IB/nes: Remove a superfluous assignment statement
  IB/mthca: Switch from dma_device to dev.parent
  IB/mlx5: Switch from dma_device to dev.parent
  IB/mlx4: Switch from dma_device to dev.parent
  IB/i40iw: Remove a superfluous assignment statement
  IB/hns: Switch from dma_device to dev.parent
  ...
parents edccb594 0bbb3b74
#ifndef _ALPHA_DMA_MAPPING_H #ifndef _ALPHA_DMA_MAPPING_H
#define _ALPHA_DMA_MAPPING_H #define _ALPHA_DMA_MAPPING_H
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return dma_ops; return dma_ops;
} }
......
...@@ -128,7 +128,7 @@ static int alpha_noop_supported(struct device *dev, u64 mask) ...@@ -128,7 +128,7 @@ static int alpha_noop_supported(struct device *dev, u64 mask)
return mask < 0x00ffffffUL ? 0 : 1; return mask < 0x00ffffffUL ? 0 : 1;
} }
struct dma_map_ops alpha_noop_ops = { const struct dma_map_ops alpha_noop_ops = {
.alloc = alpha_noop_alloc_coherent, .alloc = alpha_noop_alloc_coherent,
.free = dma_noop_free_coherent, .free = dma_noop_free_coherent,
.map_page = dma_noop_map_page, .map_page = dma_noop_map_page,
...@@ -137,5 +137,5 @@ struct dma_map_ops alpha_noop_ops = { ...@@ -137,5 +137,5 @@ struct dma_map_ops alpha_noop_ops = {
.dma_supported = alpha_noop_supported, .dma_supported = alpha_noop_supported,
}; };
struct dma_map_ops *dma_ops = &alpha_noop_ops; const struct dma_map_ops *dma_ops = &alpha_noop_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
...@@ -939,7 +939,7 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -939,7 +939,7 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
return dma_addr == 0; return dma_addr == 0;
} }
struct dma_map_ops alpha_pci_ops = { const struct dma_map_ops alpha_pci_ops = {
.alloc = alpha_pci_alloc_coherent, .alloc = alpha_pci_alloc_coherent,
.free = alpha_pci_free_coherent, .free = alpha_pci_free_coherent,
.map_page = alpha_pci_map_page, .map_page = alpha_pci_map_page,
...@@ -950,5 +950,5 @@ struct dma_map_ops alpha_pci_ops = { ...@@ -950,5 +950,5 @@ struct dma_map_ops alpha_pci_ops = {
.dma_supported = alpha_pci_supported, .dma_supported = alpha_pci_supported,
}; };
struct dma_map_ops *dma_ops = &alpha_pci_ops; const struct dma_map_ops *dma_ops = &alpha_pci_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
#include <plat/dma.h> #include <plat/dma.h>
#endif #endif
extern struct dma_map_ops arc_dma_ops; extern const struct dma_map_ops arc_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &arc_dma_ops; return &arc_dma_ops;
} }
......
...@@ -218,7 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask) ...@@ -218,7 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask)
return dma_mask == DMA_BIT_MASK(32); return dma_mask == DMA_BIT_MASK(32);
} }
struct dma_map_ops arc_dma_ops = { const struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc, .alloc = arc_dma_alloc,
.free = arc_dma_free, .free = arc_dma_free,
.mmap = arc_dma_mmap, .mmap = arc_dma_mmap,
......
...@@ -452,7 +452,7 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask) ...@@ -452,7 +452,7 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
return arm_dma_ops.set_dma_mask(dev, dma_mask); return arm_dma_ops.set_dma_mask(dev, dma_mask);
} }
static struct dma_map_ops dmabounce_ops = { static const struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc, .alloc = arm_dma_alloc,
.free = arm_dma_free, .free = arm_dma_free,
.mmap = arm_dma_mmap, .mmap = arm_dma_mmap,
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#define ASMARM_DEVICE_H #define ASMARM_DEVICE_H
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE #ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce; struct dmabounce_device_info *dmabounce;
#endif #endif
......
...@@ -13,28 +13,22 @@ ...@@ -13,28 +13,22 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops arm_dma_ops; extern const struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops; extern const struct dma_map_ops arm_coherent_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
return &arm_dma_ops; return &arm_dma_ops;
} }
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
if (xen_initial_domain()) if (xen_initial_domain())
return xen_dma_ops; return xen_dma_ops;
else else
return __generic_dma_ops(dev); return __generic_dma_ops(NULL);
}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
BUG_ON(!dev);
dev->archdata.dma_ops = ops;
} }
#define HAVE_ARCH_DMA_SUPPORTED 1 #define HAVE_ARCH_DMA_SUPPORTED 1
......
...@@ -180,7 +180,7 @@ static void arm_dma_sync_single_for_device(struct device *dev, ...@@ -180,7 +180,7 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
struct dma_map_ops arm_dma_ops = { const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc, .alloc = arm_dma_alloc,
.free = arm_dma_free, .free = arm_dma_free,
.mmap = arm_dma_mmap, .mmap = arm_dma_mmap,
...@@ -204,7 +204,7 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -204,7 +204,7 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs); unsigned long attrs);
struct dma_map_ops arm_coherent_dma_ops = { const struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc, .alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free, .free = arm_coherent_dma_free,
.mmap = arm_coherent_dma_mmap, .mmap = arm_coherent_dma_mmap,
...@@ -1069,7 +1069,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, ...@@ -1069,7 +1069,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i, j; int i, j;
...@@ -1103,7 +1103,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1103,7 +1103,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -1122,7 +1122,7 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1122,7 +1122,7 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir) int nents, enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -1141,7 +1141,7 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -1141,7 +1141,7 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir) int nents, enum dma_data_direction dir)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -2101,7 +2101,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev, ...@@ -2101,7 +2101,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
struct dma_map_ops iommu_ops = { const struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs, .alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs, .free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs, .mmap = arm_iommu_mmap_attrs,
...@@ -2121,7 +2121,7 @@ struct dma_map_ops iommu_ops = { ...@@ -2121,7 +2121,7 @@ struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource, .unmap_resource = arm_iommu_unmap_resource,
}; };
struct dma_map_ops iommu_coherent_ops = { const struct dma_map_ops iommu_coherent_ops = {
.alloc = arm_coherent_iommu_alloc_attrs, .alloc = arm_coherent_iommu_alloc_attrs,
.free = arm_coherent_iommu_free_attrs, .free = arm_coherent_iommu_free_attrs,
.mmap = arm_coherent_iommu_mmap_attrs, .mmap = arm_coherent_iommu_mmap_attrs,
...@@ -2321,7 +2321,7 @@ void arm_iommu_detach_device(struct device *dev) ...@@ -2321,7 +2321,7 @@ void arm_iommu_detach_device(struct device *dev)
} }
EXPORT_SYMBOL_GPL(arm_iommu_detach_device); EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
{ {
return coherent ? &iommu_coherent_ops : &iommu_ops; return coherent ? &iommu_coherent_ops : &iommu_ops;
} }
...@@ -2376,7 +2376,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } ...@@ -2376,7 +2376,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */ #endif /* CONFIG_ARM_DMA_USE_IOMMU */
static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{ {
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
} }
...@@ -2384,7 +2384,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) ...@@ -2384,7 +2384,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent; dev->archdata.dma_coherent = coherent;
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
......
...@@ -182,10 +182,10 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) ...@@ -182,10 +182,10 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
} }
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
struct dma_map_ops *xen_dma_ops; const struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops); EXPORT_SYMBOL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = { static const struct dma_map_ops xen_swiotlb_dma_ops = {
.alloc = xen_swiotlb_alloc_coherent, .alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent, .free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#define __ASM_DEVICE_H #define __ASM_DEVICE_H
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */ void *iommu; /* private IOMMU data */
#endif #endif
......
...@@ -25,12 +25,12 @@ ...@@ -25,12 +25,12 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0) #define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops dummy_dma_ops; extern const struct dma_map_ops dummy_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
/* /*
* We expect no ISA devices, and all other DMA masters are expected to * We expect no ISA devices, and all other DMA masters are expected to
...@@ -39,12 +39,12 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) ...@@ -39,12 +39,12 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
return &dummy_dma_ops; return &dummy_dma_ops;
} }
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
if (xen_initial_domain()) if (xen_initial_domain())
return xen_dma_ops; return xen_dma_ops;
else else
return __generic_dma_ops(dev); return __generic_dma_ops(NULL);
} }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
......
...@@ -363,7 +363,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) ...@@ -363,7 +363,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
return 0; return 0;
} }
static struct dma_map_ops swiotlb_dma_ops = { static const struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc, .alloc = __dma_alloc,
.free = __dma_free, .free = __dma_free,
.mmap = __swiotlb_mmap, .mmap = __swiotlb_mmap,
...@@ -516,7 +516,7 @@ static int __dummy_dma_supported(struct device *hwdev, u64 mask) ...@@ -516,7 +516,7 @@ static int __dummy_dma_supported(struct device *hwdev, u64 mask)
return 0; return 0;
} }
struct dma_map_ops dummy_dma_ops = { const struct dma_map_ops dummy_dma_ops = {
.alloc = __dummy_alloc, .alloc = __dummy_alloc,
.free = __dummy_free, .free = __dummy_free,
.mmap = __dummy_mmap, .mmap = __dummy_mmap,
...@@ -795,7 +795,7 @@ static void __iommu_unmap_sg_attrs(struct device *dev, ...@@ -795,7 +795,7 @@ static void __iommu_unmap_sg_attrs(struct device *dev,
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
} }
static struct dma_map_ops iommu_dma_ops = { static const struct dma_map_ops iommu_dma_ops = {
.alloc = __iommu_alloc_attrs, .alloc = __iommu_alloc_attrs,
.free = __iommu_free_attrs, .free = __iommu_free_attrs,
.mmap = __iommu_mmap_attrs, .mmap = __iommu_mmap_attrs,
...@@ -848,7 +848,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, ...@@ -848,7 +848,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
if (iommu_dma_init_domain(domain, dma_base, size, dev)) if (iommu_dma_init_domain(domain, dma_base, size, dev))
goto out_err; goto out_err;
dev->archdata.dma_ops = &iommu_dma_ops; dev->dma_ops = &iommu_dma_ops;
} }
return true; return true;
...@@ -958,7 +958,7 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -958,7 +958,7 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_teardown_dma_ops(struct device *dev) void arch_teardown_dma_ops(struct device *dev)
{ {
dev->archdata.dma_ops = NULL; dev->dma_ops = NULL;
} }
#else #else
...@@ -972,8 +972,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -972,8 +972,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
if (!dev->archdata.dma_ops) if (!dev->dma_ops)
dev->archdata.dma_ops = &swiotlb_dma_ops; dev->dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent; dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu); __iommu_setup_dma_ops(dev, dma_base, size, iommu);
......
...@@ -4,9 +4,9 @@ ...@@ -4,9 +4,9 @@
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
int direction); int direction);
extern struct dma_map_ops avr32_dma_ops; extern const struct dma_map_ops avr32_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &avr32_dma_ops; return &avr32_dma_ops;
} }
......
...@@ -191,7 +191,7 @@ static void avr32_dma_sync_sg_for_device(struct device *dev, ...@@ -191,7 +191,7 @@ static void avr32_dma_sync_sg_for_device(struct device *dev,
dma_cache_sync(dev, sg_virt(sg), sg->length, direction); dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
} }
struct dma_map_ops avr32_dma_ops = { const struct dma_map_ops avr32_dma_ops = {
.alloc = avr32_dma_alloc, .alloc = avr32_dma_alloc,
.free = avr32_dma_free, .free = avr32_dma_free,
.map_page = avr32_dma_map_page, .map_page = avr32_dma_map_page,
......
...@@ -36,9 +36,9 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) ...@@ -36,9 +36,9 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
__dma_sync(addr, size, dir); __dma_sync(addr, size, dir);
} }
extern struct dma_map_ops bfin_dma_ops; extern const struct dma_map_ops bfin_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &bfin_dma_ops; return &bfin_dma_ops;
} }
......
...@@ -159,7 +159,7 @@ static inline void bfin_dma_sync_single_for_device(struct device *dev, ...@@ -159,7 +159,7 @@ static inline void bfin_dma_sync_single_for_device(struct device *dev,
_dma_sync(handle, size, dir); _dma_sync(handle, size, dir);
} }
struct dma_map_ops bfin_dma_ops = { const struct dma_map_ops bfin_dma_ops = {
.alloc = bfin_dma_alloc, .alloc = bfin_dma_alloc,
.free = bfin_dma_free, .free = bfin_dma_free,
......
...@@ -17,9 +17,9 @@ ...@@ -17,9 +17,9 @@
*/ */
#define DMA_ERROR_CODE ~0 #define DMA_ERROR_CODE ~0
extern struct dma_map_ops c6x_dma_ops; extern const struct dma_map_ops c6x_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &c6x_dma_ops; return &c6x_dma_ops;
} }
......
...@@ -123,7 +123,7 @@ static void c6x_dma_sync_sg_for_device(struct device *dev, ...@@ -123,7 +123,7 @@ static void c6x_dma_sync_sg_for_device(struct device *dev,
} }
struct dma_map_ops c6x_dma_ops = { const struct dma_map_ops c6x_dma_ops = {
.alloc = c6x_dma_alloc, .alloc = c6x_dma_alloc,
.free = c6x_dma_free, .free = c6x_dma_free,
.map_page = c6x_dma_map_page, .map_page = c6x_dma_map_page,
......
...@@ -69,7 +69,7 @@ static inline int v32_dma_supported(struct device *dev, u64 mask) ...@@ -69,7 +69,7 @@ static inline int v32_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops v32_dma_ops = { const struct dma_map_ops v32_dma_ops = {
.alloc = v32_dma_alloc, .alloc = v32_dma_alloc,
.free = v32_dma_free, .free = v32_dma_free,
.map_page = v32_dma_map_page, .map_page = v32_dma_map_page,
......
...@@ -2,14 +2,14 @@ ...@@ -2,14 +2,14 @@
#define _ASM_CRIS_DMA_MAPPING_H #define _ASM_CRIS_DMA_MAPPING_H
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern struct dma_map_ops v32_dma_ops; extern const struct dma_map_ops v32_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &v32_dma_ops; return &v32_dma_ops;
} }
#else #else
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
BUG(); BUG();
return NULL; return NULL;
......
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
extern unsigned long __nongprelbss dma_coherent_mem_start; extern unsigned long __nongprelbss dma_coherent_mem_start;
extern unsigned long __nongprelbss dma_coherent_mem_end; extern unsigned long __nongprelbss dma_coherent_mem_end;
extern struct dma_map_ops frv_dma_ops; extern const struct dma_map_ops frv_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &frv_dma_ops; return &frv_dma_ops;
} }
......
...@@ -164,7 +164,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) ...@@ -164,7 +164,7 @@ static int frv_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops frv_dma_ops = { const struct dma_map_ops frv_dma_ops = {
.alloc = frv_dma_alloc, .alloc = frv_dma_alloc,
.free = frv_dma_free, .free = frv_dma_free,
.map_page = frv_dma_map_page, .map_page = frv_dma_map_page,
......
...@@ -106,7 +106,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) ...@@ -106,7 +106,7 @@ static int frv_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops frv_dma_ops = { const struct dma_map_ops frv_dma_ops = {
.alloc = frv_dma_alloc, .alloc = frv_dma_alloc,
.free = frv_dma_free, .free = frv_dma_free,
.map_page = frv_dma_map_page, .map_page = frv_dma_map_page,
......
#ifndef _H8300_DMA_MAPPING_H #ifndef _H8300_DMA_MAPPING_H
#define _H8300_DMA_MAPPING_H #define _H8300_DMA_MAPPING_H
extern struct dma_map_ops h8300_dma_map_ops; extern const struct dma_map_ops h8300_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &h8300_dma_map_ops; return &h8300_dma_map_ops;
} }
......
...@@ -60,7 +60,7 @@ static int map_sg(struct device *dev, struct scatterlist *sgl, ...@@ -60,7 +60,7 @@ static int map_sg(struct device *dev, struct scatterlist *sgl,
return nents; return nents;
} }
struct dma_map_ops h8300_dma_map_ops = { const struct dma_map_ops h8300_dma_map_ops = {
.alloc = dma_alloc, .alloc = dma_alloc,
.free = dma_free, .free = dma_free,
.map_page = map_page, .map_page = map_page,
......
...@@ -32,13 +32,10 @@ struct device; ...@@ -32,13 +32,10 @@ struct device;
extern int bad_dma_address; extern int bad_dma_address;
#define DMA_ERROR_CODE bad_dma_address #define DMA_ERROR_CODE bad_dma_address
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
if (unlikely(dev == NULL))
return NULL;
return dma_ops; return dma_ops;
} }
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/page.h> #include <asm/page.h>
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
int bad_dma_address; /* globals are automatically initialized to zero */ int bad_dma_address; /* globals are automatically initialized to zero */
...@@ -203,7 +203,7 @@ static void hexagon_sync_single_for_device(struct device *dev, ...@@ -203,7 +203,7 @@ static void hexagon_sync_single_for_device(struct device *dev,
dma_sync(dma_addr_to_virt(dma_handle), size, dir); dma_sync(dma_addr_to_virt(dma_handle), size, dir);
} }
struct dma_map_ops hexagon_dma_ops = { const struct dma_map_ops hexagon_dma_ops = {
.alloc = hexagon_dma_alloc_coherent, .alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent, .free = hexagon_free_coherent,
.map_sg = hexagon_map_sg, .map_sg = hexagon_map_sg,
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <asm/machvec.h> #include <asm/machvec.h>
extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
/* swiotlb declarations & definitions: */ /* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size); extern int swiotlb_late_init_with_default_size (size_t size);
...@@ -34,7 +34,7 @@ static inline int use_swiotlb(struct device *dev) ...@@ -34,7 +34,7 @@ static inline int use_swiotlb(struct device *dev)
!sba_dma_ops.dma_supported(dev, *dev->dma_mask); !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
} }
struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{ {
if (use_swiotlb(dev)) if (use_swiotlb(dev))
return &swiotlb_dma_ops; return &swiotlb_dma_ops;
......
...@@ -2096,7 +2096,7 @@ static int __init acpi_sba_ioc_init_acpi(void) ...@@ -2096,7 +2096,7 @@ static int __init acpi_sba_ioc_init_acpi(void)
/* This has to run before acpi_scan_init(). */ /* This has to run before acpi_scan_init(). */
arch_initcall(acpi_sba_ioc_init_acpi); arch_initcall(acpi_sba_ioc_init_acpi);
extern struct dma_map_ops swiotlb_dma_ops; extern const struct dma_map_ops swiotlb_dma_ops;
static int __init static int __init
sba_init(void) sba_init(void)
...@@ -2216,7 +2216,7 @@ sba_page_override(char *str) ...@@ -2216,7 +2216,7 @@ sba_page_override(char *str)
__setup("sbapagesize=",sba_page_override); __setup("sbapagesize=",sba_page_override);
struct dma_map_ops sba_dma_ops = { const struct dma_map_ops sba_dma_ops = {
.alloc = sba_alloc_coherent, .alloc = sba_alloc_coherent,
.free = sba_free_coherent, .free = sba_free_coherent,
.map_page = sba_map_page, .map_page = sba_map_page,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#define DMA_ERROR_CODE 0 #define DMA_ERROR_CODE 0
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv; extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void); extern void set_iommu_machvec(void);
...@@ -23,7 +23,10 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, ...@@ -23,7 +23,10 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
enum dma_data_direction); enum dma_data_direction);
#define get_dma_ops(dev) platform_dma_get_ops(dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return platform_dma_get_ops(NULL);
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
......
...@@ -44,7 +44,7 @@ typedef void ia64_mv_kernel_launch_event_t(void); ...@@ -44,7 +44,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
/* DMA-mapping interface: */ /* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void); typedef void ia64_mv_dma_init (void);
typedef u64 ia64_mv_dma_get_required_mask (struct device *); typedef u64 ia64_mv_dma_get_required_mask (struct device *);
typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
/* /*
* WARNING: The legacy I/O space is _architected_. Platforms are * WARNING: The legacy I/O space is _architected_. Platforms are
...@@ -248,7 +248,7 @@ extern void machvec_init_from_cmdline(const char *cmdline); ...@@ -248,7 +248,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
# endif /* CONFIG_IA64_GENERIC */ # endif /* CONFIG_IA64_GENERIC */
extern void swiotlb_dma_init(void); extern void swiotlb_dma_init(void);
extern struct dma_map_ops *dma_get_ops(struct device *); extern const struct dma_map_ops *dma_get_ops(struct device *);
/* /*
* Define default versions so we can extend machvec for new platforms without having * Define default versions so we can extend machvec for new platforms without having
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
/* Set this to 1 if there is a HW IOMMU in the system */ /* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly; int iommu_detected __read_mostly;
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
...@@ -17,7 +17,7 @@ static int __init dma_init(void) ...@@ -17,7 +17,7 @@ static int __init dma_init(void)
} }
fs_initcall(dma_init); fs_initcall(dma_init);
struct dma_map_ops *dma_get_ops(struct device *dev) const struct dma_map_ops *dma_get_ops(struct device *dev)
{ {
return dma_ops; return dma_ops;
} }
......
...@@ -90,11 +90,11 @@ void __init pci_iommu_alloc(void) ...@@ -90,11 +90,11 @@ void __init pci_iommu_alloc(void)
{ {
dma_ops = &intel_dma_ops; dma_ops = &intel_dma_ops;
dma_ops->sync_single_for_cpu = machvec_dma_sync_single; intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single;
dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg;
dma_ops->sync_single_for_device = machvec_dma_sync_single; intel_dma_ops.sync_single_for_device = machvec_dma_sync_single;
dma_ops->sync_sg_for_device = machvec_dma_sync_sg; intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg;
dma_ops->dma_supported = iommu_dma_supported; intel_dma_ops.dma_supported = iommu_dma_supported;
/* /*
* The order of these functions is important for * The order of these functions is important for
......
...@@ -30,7 +30,7 @@ static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, ...@@ -30,7 +30,7 @@ static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_addr); swiotlb_free_coherent(dev, size, vaddr, dma_addr);
} }
struct dma_map_ops swiotlb_dma_ops = { const struct dma_map_ops swiotlb_dma_ops = {
.alloc = ia64_swiotlb_alloc_coherent, .alloc = ia64_swiotlb_alloc_coherent,
.free = ia64_swiotlb_free_coherent, .free = ia64_swiotlb_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
......
...@@ -18,6 +18,7 @@ config M32R ...@@ -18,6 +18,7 @@ config M32R
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select DMA_NOOP_OPS
config SBUS config SBUS
bool bool
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* This file is released under the GPLv2 * This file is released under the GPLv2
*/ */
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -10,10 +10,8 @@ ...@@ -10,10 +10,8 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &dma_noop_ops; return &dma_noop_ops;
} }
......
#ifndef _M68K_DMA_MAPPING_H #ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H #define _M68K_DMA_MAPPING_H
extern struct dma_map_ops m68k_dma_ops; extern const struct dma_map_ops m68k_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &m68k_dma_ops; return &m68k_dma_ops;
} }
......
...@@ -158,7 +158,7 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -158,7 +158,7 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
return nents; return nents;
} }
struct dma_map_ops m68k_dma_ops = { const struct dma_map_ops m68k_dma_ops = {
.alloc = m68k_dma_alloc, .alloc = m68k_dma_alloc,
.free = m68k_dma_free, .free = m68k_dma_free,
.map_page = m68k_dma_map_page, .map_page = m68k_dma_map_page,
......
#ifndef _ASM_METAG_DMA_MAPPING_H #ifndef _ASM_METAG_DMA_MAPPING_H
#define _ASM_METAG_DMA_MAPPING_H #define _ASM_METAG_DMA_MAPPING_H
extern struct dma_map_ops metag_dma_ops; extern const struct dma_map_ops metag_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &metag_dma_ops; return &metag_dma_ops;
} }
......
...@@ -575,7 +575,7 @@ static void metag_dma_sync_sg_for_device(struct device *dev, ...@@ -575,7 +575,7 @@ static void metag_dma_sync_sg_for_device(struct device *dev,
dma_sync_for_device(sg_virt(sg), sg->length, direction); dma_sync_for_device(sg_virt(sg), sg->length, direction);
} }
struct dma_map_ops metag_dma_ops = { const struct dma_map_ops metag_dma_ops = {
.alloc = metag_dma_alloc, .alloc = metag_dma_alloc,
.free = metag_dma_free, .free = metag_dma_free,
.map_page = metag_dma_map_page, .map_page = metag_dma_map_page,
......
...@@ -36,9 +36,9 @@ ...@@ -36,9 +36,9 @@
/* /*
* Available generic sets of operations * Available generic sets of operations
*/ */
extern struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_direct_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &dma_direct_ops; return &dma_direct_ops;
} }
......
...@@ -187,7 +187,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, ...@@ -187,7 +187,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
#endif #endif
} }
struct dma_map_ops dma_direct_ops = { const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent, .alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent, .free = dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
......
...@@ -200,7 +200,7 @@ static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr ...@@ -200,7 +200,7 @@ static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr
} }
struct octeon_dma_map_ops { struct octeon_dma_map_ops {
struct dma_map_ops dma_map_ops; const struct dma_map_ops dma_map_ops;
dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
}; };
...@@ -328,7 +328,7 @@ static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { ...@@ -328,7 +328,7 @@ static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
}, },
}; };
struct dma_map_ops *octeon_pci_dma_map_ops; const struct dma_map_ops *octeon_pci_dma_map_ops;
void __init octeon_pci_dma_init(void) void __init octeon_pci_dma_init(void)
{ {
......
...@@ -6,12 +6,7 @@ ...@@ -6,12 +6,7 @@
#ifndef _ASM_MIPS_DEVICE_H #ifndef _ASM_MIPS_DEVICE_H
#define _ASM_MIPS_DEVICE_H #define _ASM_MIPS_DEVICE_H
struct dma_map_ops;
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMA_PERDEV_COHERENT #ifdef CONFIG_DMA_PERDEV_COHERENT
/* Non-zero if DMA is coherent with CPU caches */ /* Non-zero if DMA is coherent with CPU caches */
bool dma_coherent; bool dma_coherent;
......
...@@ -9,14 +9,11 @@ ...@@ -9,14 +9,11 @@
#include <dma-coherence.h> #include <dma-coherence.h>
#endif #endif
extern struct dma_map_ops *mips_dma_map_ops; extern const struct dma_map_ops *mips_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
if (dev && dev->archdata.dma_ops) return mips_dma_map_ops;
return dev->archdata.dma_ops;
else
return mips_dma_map_ops;
} }
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
......
...@@ -65,7 +65,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); ...@@ -65,7 +65,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
struct dma_map_ops; struct dma_map_ops;
extern struct dma_map_ops *octeon_pci_dma_map_ops; extern const struct dma_map_ops *octeon_pci_dma_map_ops;
extern char *octeon_swiotlb; extern char *octeon_swiotlb;
#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */ #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
...@@ -88,7 +88,7 @@ extern struct plat_smp_ops nlm_smp_ops; ...@@ -88,7 +88,7 @@ extern struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[]; extern char nlm_reset_entry[], nlm_reset_entry_end[];
/* SWIOTLB */ /* SWIOTLB */
extern struct dma_map_ops nlm_swiotlb_dma_ops; extern const struct dma_map_ops nlm_swiotlb_dma_ops;
extern unsigned int nlm_threads_per_core; extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask; extern cpumask_t nlm_cpumask;
......
...@@ -114,7 +114,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -114,7 +114,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr; return daddr;
} }
static struct dma_map_ops loongson_dma_map_ops = { static const struct dma_map_ops loongson_dma_map_ops = {
.alloc = loongson_dma_alloc_coherent, .alloc = loongson_dma_alloc_coherent,
.free = loongson_dma_free_coherent, .free = loongson_dma_free_coherent,
.map_page = loongson_dma_map_page, .map_page = loongson_dma_map_page,
......
...@@ -417,7 +417,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -417,7 +417,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
EXPORT_SYMBOL(dma_cache_sync); EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = { static const struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent, .alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent, .free = mips_dma_free_coherent,
.mmap = mips_dma_mmap, .mmap = mips_dma_mmap,
...@@ -433,7 +433,7 @@ static struct dma_map_ops mips_default_dma_map_ops = { ...@@ -433,7 +433,7 @@ static struct dma_map_ops mips_default_dma_map_ops = {
.dma_supported = mips_dma_supported .dma_supported = mips_dma_supported
}; };
struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops); EXPORT_SYMBOL(mips_dma_map_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
......
...@@ -67,7 +67,7 @@ static void nlm_dma_free_coherent(struct device *dev, size_t size, ...@@ -67,7 +67,7 @@ static void nlm_dma_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_handle); swiotlb_free_coherent(dev, size, vaddr, dma_handle);
} }
struct dma_map_ops nlm_swiotlb_dma_ops = { const struct dma_map_ops nlm_swiotlb_dma_ops = {
.alloc = nlm_dma_alloc_coherent, .alloc = nlm_dma_alloc_coherent,
.free = nlm_dma_free_coherent, .free = nlm_dma_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
......
...@@ -167,7 +167,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) ...@@ -167,7 +167,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
} }
dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops; dev->dev.dma_ops = octeon_pci_dma_map_ops;
return 0; return 0;
} }
......
...@@ -14,9 +14,9 @@ ...@@ -14,9 +14,9 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/io.h> #include <asm/io.h>
extern struct dma_map_ops mn10300_dma_ops; extern const struct dma_map_ops mn10300_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &mn10300_dma_ops; return &mn10300_dma_ops;
} }
......
...@@ -121,7 +121,7 @@ static int mn10300_dma_supported(struct device *dev, u64 mask) ...@@ -121,7 +121,7 @@ static int mn10300_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
struct dma_map_ops mn10300_dma_ops = { const struct dma_map_ops mn10300_dma_ops = {
.alloc = mn10300_dma_alloc, .alloc = mn10300_dma_alloc,
.free = mn10300_dma_free, .free = mn10300_dma_free,
.map_page = mn10300_dma_map_page, .map_page = mn10300_dma_map_page,
......
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
#ifndef _ASM_NIOS2_DMA_MAPPING_H #ifndef _ASM_NIOS2_DMA_MAPPING_H
#define _ASM_NIOS2_DMA_MAPPING_H #define _ASM_NIOS2_DMA_MAPPING_H
extern struct dma_map_ops nios2_dma_ops; extern const struct dma_map_ops nios2_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &nios2_dma_ops; return &nios2_dma_ops;
} }
......
...@@ -192,7 +192,7 @@ static void nios2_dma_sync_sg_for_device(struct device *dev, ...@@ -192,7 +192,7 @@ static void nios2_dma_sync_sg_for_device(struct device *dev,
} }
struct dma_map_ops nios2_dma_ops = { const struct dma_map_ops nios2_dma_ops = {
.alloc = nios2_dma_alloc, .alloc = nios2_dma_alloc,
.free = nios2_dma_free, .free = nios2_dma_free,
.map_page = nios2_dma_map_page, .map_page = nios2_dma_map_page,
......
...@@ -28,9 +28,9 @@ ...@@ -28,9 +28,9 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops or1k_dma_map_ops; extern const struct dma_map_ops or1k_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &or1k_dma_map_ops; return &or1k_dma_map_ops;
} }
......
...@@ -232,7 +232,7 @@ or1k_sync_single_for_device(struct device *dev, ...@@ -232,7 +232,7 @@ or1k_sync_single_for_device(struct device *dev,
mtspr(SPR_DCBFR, cl); mtspr(SPR_DCBFR, cl);
} }
struct dma_map_ops or1k_dma_map_ops = { const struct dma_map_ops or1k_dma_map_ops = {
.alloc = or1k_dma_alloc, .alloc = or1k_dma_alloc,
.free = or1k_dma_free, .free = or1k_dma_free,
.map_page = or1k_map_page, .map_page = or1k_map_page,
......
...@@ -21,13 +21,13 @@ ...@@ -21,13 +21,13 @@
*/ */
#ifdef CONFIG_PA11 #ifdef CONFIG_PA11
extern struct dma_map_ops pcxl_dma_ops; extern const struct dma_map_ops pcxl_dma_ops;
extern struct dma_map_ops pcx_dma_ops; extern const struct dma_map_ops pcx_dma_ops;
#endif #endif
extern struct dma_map_ops *hppa_dma_ops; extern const struct dma_map_ops *hppa_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return hppa_dma_ops; return hppa_dma_ops;
} }
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <asm/parisc-device.h> #include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */ /* See comments in include/asm-parisc/pci.h */
struct dma_map_ops *hppa_dma_ops __read_mostly; const struct dma_map_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops); EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = { static struct device root = {
......
...@@ -572,7 +572,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist * ...@@ -572,7 +572,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
flush_kernel_vmap_range(sg_virt(sg), sg->length); flush_kernel_vmap_range(sg_virt(sg), sg->length);
} }
struct dma_map_ops pcxl_dma_ops = { const struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported, .dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc, .alloc = pa11_dma_alloc,
.free = pa11_dma_free, .free = pa11_dma_free,
...@@ -608,7 +608,7 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -608,7 +608,7 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
return; return;
} }
struct dma_map_ops pcx_dma_ops = { const struct dma_map_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported, .dma_supported = pa11_dma_supported,
.alloc = pcx_dma_alloc, .alloc = pcx_dma_alloc,
.free = pcx_dma_free, .free = pcx_dma_free,
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#ifndef _ASM_POWERPC_DEVICE_H #ifndef _ASM_POWERPC_DEVICE_H
#define _ASM_POWERPC_DEVICE_H #define _ASM_POWERPC_DEVICE_H
struct dma_map_ops;
struct device_node; struct device_node;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
struct pci_dn; struct pci_dn;
...@@ -20,9 +19,6 @@ struct iommu_table; ...@@ -20,9 +19,6 @@ struct iommu_table;
* drivers/macintosh/macio_asic.c * drivers/macintosh/macio_asic.c
*/ */
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
/* /*
* These two used to be a union. However, with the hybrid ops we need * These two used to be a union. However, with the hybrid ops we need
* both so here we store both a DMA offset for direct mappings and * both so here we store both a DMA offset for direct mappings and
......
...@@ -76,24 +76,16 @@ static inline unsigned long device_to_mask(struct device *dev) ...@@ -76,24 +76,16 @@ static inline unsigned long device_to_mask(struct device *dev)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops; extern struct dma_map_ops dma_iommu_ops;
#endif #endif
extern struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_direct_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
/* We don't handle the NULL dev case for ISA for now. We could /* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now. The * do it via an out of line call but it is not needed for now. The
* only ISA DMA device we support is the floppy and we have a hack * only ISA DMA device we support is the floppy and we have a hack
* in the floppy driver directly to get a device for us. * in the floppy driver directly to get a device for us.
*/ */
if (unlikely(dev == NULL)) return NULL;
return NULL;
return dev->archdata.dma_ops;
}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
} }
/* /*
......
...@@ -53,8 +53,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) ...@@ -53,8 +53,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
} }
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
extern struct dma_map_ops *get_pci_dma_ops(void); extern const struct dma_map_ops *get_pci_dma_ops(void);
#else /* CONFIG_PCI */ #else /* CONFIG_PCI */
#define set_pci_dma_ops(d) #define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL #define get_pci_dma_ops() NULL
......
...@@ -435,7 +435,7 @@ static inline void *ps3_system_bus_get_drvdata( ...@@ -435,7 +435,7 @@ static inline void *ps3_system_bus_get_drvdata(
return dev_get_drvdata(&dev->core); return dev_get_drvdata(&dev->core);
} }
/* These two need global scope for get_dma_ops(). */ /* These two need global scope for get_arch_dma_ops(). */
extern struct bus_type ps3_system_bus_type; extern struct bus_type ps3_system_bus_type;
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
extern struct dma_map_ops swiotlb_dma_ops; extern const struct dma_map_ops swiotlb_dma_ops;
static inline void dma_mark_clean(void *addr, size_t size) {} static inline void dma_mark_clean(void *addr, size_t size) {}
......
...@@ -46,7 +46,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) ...@@ -46,7 +46,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* map_page, and unmap_page on highmem, use normal dma_ops * map_page, and unmap_page on highmem, use normal dma_ops
* for everything else. * for everything else.
*/ */
struct dma_map_ops swiotlb_dma_ops = { const struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_direct_alloc_coherent, .alloc = __dma_direct_alloc_coherent,
.free = __dma_direct_free_coherent, .free = __dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
......
...@@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) ...@@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
struct dev_archdata __maybe_unused *sd = &dev->archdata; struct dev_archdata __maybe_unused *sd = &dev->archdata;
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
#endif #endif
...@@ -274,7 +274,7 @@ static inline void dma_direct_sync_single(struct device *dev, ...@@ -274,7 +274,7 @@ static inline void dma_direct_sync_single(struct device *dev,
} }
#endif #endif
struct dma_map_ops dma_direct_ops = { const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent, .alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent, .free = dma_direct_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
...@@ -316,7 +316,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask); ...@@ -316,7 +316,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
int __dma_set_mask(struct device *dev, u64 dma_mask) int __dma_set_mask(struct device *dev, u64 dma_mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask); return dma_ops->set_dma_mask(dev, dma_mask);
...@@ -344,7 +344,7 @@ EXPORT_SYMBOL(dma_set_mask); ...@@ -344,7 +344,7 @@ EXPORT_SYMBOL(dma_set_mask);
u64 __dma_get_required_mask(struct device *dev) u64 __dma_get_required_mask(struct device *dev)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (unlikely(dma_ops == NULL)) if (unlikely(dma_ops == NULL))
return 0; return 0;
......
...@@ -60,14 +60,14 @@ resource_size_t isa_mem_base; ...@@ -60,14 +60,14 @@ resource_size_t isa_mem_base;
EXPORT_SYMBOL(isa_mem_base); EXPORT_SYMBOL(isa_mem_base);
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
void set_pci_dma_ops(struct dma_map_ops *dma_ops) void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{ {
pci_dma_ops = dma_ops; pci_dma_ops = dma_ops;
} }
struct dma_map_ops *get_pci_dma_ops(void) const struct dma_map_ops *get_pci_dma_ops(void)
{ {
return pci_dma_ops; return pci_dma_ops;
} }
......
...@@ -651,7 +651,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask) ...@@ -651,7 +651,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
static struct dma_map_ops dma_iommu_fixed_ops = { static const struct dma_map_ops dma_iommu_fixed_ops = {
.alloc = dma_fixed_alloc_coherent, .alloc = dma_fixed_alloc_coherent,
.free = dma_fixed_free_coherent, .free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg, .map_sg = dma_fixed_map_sg,
...@@ -692,7 +692,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, ...@@ -692,7 +692,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
return 0; return 0;
/* We use the PCI DMA ops */ /* We use the PCI DMA ops */
dev->archdata.dma_ops = get_pci_dma_ops(); dev->dma_ops = get_pci_dma_ops();
cell_dma_dev_setup(dev); cell_dma_dev_setup(dev);
...@@ -1172,7 +1172,7 @@ __setup("iommu_fixed=", setup_iommu_fixed); ...@@ -1172,7 +1172,7 @@ __setup("iommu_fixed=", setup_iommu_fixed);
static u64 cell_dma_get_required_mask(struct device *dev) static u64 cell_dma_get_required_mask(struct device *dev)
{ {
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
if (!dev->dma_mask) if (!dev->dma_mask)
return 0; return 0;
......
...@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) ...@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
*/ */
if (dev->vendor == 0x1959 && dev->device == 0xa007 && if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) { !firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops; dev->dev.dma_ops = &dma_direct_ops;
/* /*
* Set the coherent DMA mask to prevent the iommu * Set the coherent DMA mask to prevent the iommu
* being used unnecessarily * being used unnecessarily
......
...@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action, ...@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
return 0; return 0;
/* We use the direct ops for localbus */ /* We use the direct ops for localbus */
dev->archdata.dma_ops = &dma_direct_ops; dev->dma_ops = &dma_direct_ops;
return 0; return 0;
} }
......
...@@ -115,7 +115,7 @@ static u64 dma_npu_get_required_mask(struct device *dev) ...@@ -115,7 +115,7 @@ static u64 dma_npu_get_required_mask(struct device *dev)
return 0; return 0;
} }
static struct dma_map_ops dma_npu_ops = { static const struct dma_map_ops dma_npu_ops = {
.map_page = dma_npu_map_page, .map_page = dma_npu_map_page,
.map_sg = dma_npu_map_sg, .map_sg = dma_npu_map_sg,
.alloc = dma_npu_alloc, .alloc = dma_npu_alloc,
......
...@@ -701,7 +701,7 @@ static u64 ps3_dma_get_required_mask(struct device *_dev) ...@@ -701,7 +701,7 @@ static u64 ps3_dma_get_required_mask(struct device *_dev)
return DMA_BIT_MASK(32); return DMA_BIT_MASK(32);
} }
static struct dma_map_ops ps3_sb_dma_ops = { static const struct dma_map_ops ps3_sb_dma_ops = {
.alloc = ps3_alloc_coherent, .alloc = ps3_alloc_coherent,
.free = ps3_free_coherent, .free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg, .map_sg = ps3_sb_map_sg,
...@@ -712,7 +712,7 @@ static struct dma_map_ops ps3_sb_dma_ops = { ...@@ -712,7 +712,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
}; };
static struct dma_map_ops ps3_ioc0_dma_ops = { static const struct dma_map_ops ps3_ioc0_dma_ops = {
.alloc = ps3_alloc_coherent, .alloc = ps3_alloc_coherent,
.free = ps3_free_coherent, .free = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg, .map_sg = ps3_ioc0_map_sg,
...@@ -756,11 +756,11 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) ...@@ -756,11 +756,11 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
switch (dev->dev_type) { switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0: case PS3_DEVICE_TYPE_IOC0:
dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; dev->core.dma_ops = &ps3_ioc0_dma_ops;
dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count); dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break; break;
case PS3_DEVICE_TYPE_SB: case PS3_DEVICE_TYPE_SB:
dev->core.archdata.dma_ops = &ps3_sb_dma_ops; dev->core.dma_ops = &ps3_sb_dma_ops;
dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count); dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break; break;
......
...@@ -136,7 +136,7 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev) ...@@ -136,7 +136,7 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev)
return DMA_BIT_MASK(64); return DMA_BIT_MASK(64);
} }
static struct dma_map_ops ibmebus_dma_ops = { static const struct dma_map_ops ibmebus_dma_ops = {
.alloc = ibmebus_alloc_coherent, .alloc = ibmebus_alloc_coherent,
.free = ibmebus_free_coherent, .free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg, .map_sg = ibmebus_map_sg,
...@@ -169,7 +169,7 @@ static int ibmebus_create_device(struct device_node *dn) ...@@ -169,7 +169,7 @@ static int ibmebus_create_device(struct device_node *dn)
return -ENOMEM; return -ENOMEM;
dev->dev.bus = &ibmebus_bus_type; dev->dev.bus = &ibmebus_bus_type;
dev->dev.archdata.dma_ops = &ibmebus_dma_ops; dev->dev.dma_ops = &ibmebus_dma_ops;
ret = of_device_add(dev); ret = of_device_add(dev);
if (ret) if (ret)
......
...@@ -615,7 +615,7 @@ static u64 vio_dma_get_required_mask(struct device *dev) ...@@ -615,7 +615,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
return dma_iommu_ops.get_required_mask(dev); return dma_iommu_ops.get_required_mask(dev);
} }
static struct dma_map_ops vio_dma_mapping_ops = { static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent, .alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent, .free = vio_dma_iommu_free_coherent,
.mmap = dma_direct_mmap_coherent, .mmap = dma_direct_mmap_coherent,
......
...@@ -137,6 +137,7 @@ config S390 ...@@ -137,6 +137,7 @@ config S390
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select DMA_NOOP_OPS
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* This file is released under the GPLv2 * This file is released under the GPLv2
*/ */
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -10,12 +10,10 @@ ...@@ -10,12 +10,10 @@
#define DMA_ERROR_CODE (~(dma_addr_t) 0x0) #define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
extern struct dma_map_ops s390_pci_dma_ops; extern const struct dma_map_ops s390_pci_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &dma_noop_ops; return &dma_noop_ops;
} }
......
...@@ -641,7 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev) ...@@ -641,7 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev)
int i; int i;
pdev->dev.groups = zpci_attr_groups; pdev->dev.groups = zpci_attr_groups;
pdev->dev.archdata.dma_ops = &s390_pci_dma_ops; pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev); zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) { for (i = 0; i < PCI_BAR_COUNT; i++) {
......
...@@ -650,7 +650,7 @@ static int __init dma_debug_do_init(void) ...@@ -650,7 +650,7 @@ static int __init dma_debug_do_init(void)
} }
fs_initcall(dma_debug_do_init); fs_initcall(dma_debug_do_init);
struct dma_map_ops s390_pci_dma_ops = { const struct dma_map_ops s390_pci_dma_ops = {
.alloc = s390_dma_alloc, .alloc = s390_dma_alloc,
.free = s390_dma_free, .free = s390_dma_free,
.map_sg = s390_dma_map_sg, .map_sg = s390_dma_map_sg,
......
#ifndef __ASM_SH_DMA_MAPPING_H #ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
extern void no_iommu_init(void); extern void no_iommu_init(void);
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return dma_ops; return dma_ops;
} }
......
...@@ -65,7 +65,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, ...@@ -65,7 +65,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
} }
#endif #endif
struct dma_map_ops nommu_dma_ops = { const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent, .alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent, .free = dma_generic_free_coherent,
.map_page = nommu_map_page, .map_page = nommu_map_page,
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define PREALLOC_DMA_DEBUG_ENTRIES 4096 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
static int __init dma_init(void) static int __init dma_init(void)
......
...@@ -18,20 +18,20 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -18,20 +18,20 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
*/ */
} }
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
extern struct dma_map_ops *leon_dma_ops; extern const struct dma_map_ops *leon_dma_ops;
extern struct dma_map_ops pci32_dma_ops; extern const struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type; extern struct bus_type pci_bus_type;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
#ifdef CONFIG_SPARC_LEON #ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon) if (sparc_cpu_model == sparc_leon)
return leon_dma_ops; return leon_dma_ops;
#endif #endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (dev->bus == &pci_bus_type) if (bus == &pci_bus_type)
return &pci32_dma_ops; return &pci32_dma_ops;
#endif #endif
return dma_ops; return dma_ops;
......
...@@ -741,7 +741,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, ...@@ -741,7 +741,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
static struct dma_map_ops sun4u_dma_ops = { static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent, .alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent, .free = dma_4u_free_coherent,
.map_page = dma_4u_map_page, .map_page = dma_4u_map_page,
...@@ -752,7 +752,7 @@ static struct dma_map_ops sun4u_dma_ops = { ...@@ -752,7 +752,7 @@ static struct dma_map_ops sun4u_dma_ops = {
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
}; };
struct dma_map_ops *dma_ops = &sun4u_dma_ops; const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
int dma_supported(struct device *dev, u64 device_mask) int dma_supported(struct device *dev, u64 device_mask)
......
...@@ -401,7 +401,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -401,7 +401,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG(); BUG();
} }
static struct dma_map_ops sbus_dma_ops = { static const struct dma_map_ops sbus_dma_ops = {
.alloc = sbus_alloc_coherent, .alloc = sbus_alloc_coherent,
.free = sbus_free_coherent, .free = sbus_free_coherent,
.map_page = sbus_map_page, .map_page = sbus_map_page,
...@@ -637,7 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * ...@@ -637,7 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
} }
} }
struct dma_map_ops pci32_dma_ops = { const struct dma_map_ops pci32_dma_ops = {
.alloc = pci32_alloc_coherent, .alloc = pci32_alloc_coherent,
.free = pci32_free_coherent, .free = pci32_free_coherent,
.map_page = pci32_map_page, .map_page = pci32_map_page,
...@@ -652,10 +652,10 @@ struct dma_map_ops pci32_dma_ops = { ...@@ -652,10 +652,10 @@ struct dma_map_ops pci32_dma_ops = {
EXPORT_SYMBOL(pci32_dma_ops); EXPORT_SYMBOL(pci32_dma_ops);
/* leon re-uses pci32_dma_ops */ /* leon re-uses pci32_dma_ops */
struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
EXPORT_SYMBOL(leon_dma_ops); EXPORT_SYMBOL(leon_dma_ops);
struct dma_map_ops *dma_ops = &sbus_dma_ops; const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
......
...@@ -669,7 +669,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -669,7 +669,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
local_irq_restore(flags); local_irq_restore(flags);
} }
static struct dma_map_ops sun4v_dma_ops = { static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent, .alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent, .free = dma_4v_free_coherent,
.map_page = dma_4v_map_page, .map_page = dma_4v_map_page,
......
...@@ -17,9 +17,6 @@ ...@@ -17,9 +17,6 @@
#define _ASM_TILE_DEVICE_H #define _ASM_TILE_DEVICE_H
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
/* Offset of the DMA address from the PA. */ /* Offset of the DMA address from the PA. */
dma_addr_t dma_offset; dma_addr_t dma_offset;
......
...@@ -24,17 +24,14 @@ ...@@ -24,17 +24,14 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK #define ARCH_HAS_DMA_GET_REQUIRED_MASK
#endif #endif
extern struct dma_map_ops *tile_dma_map_ops; extern const struct dma_map_ops *tile_dma_map_ops;
extern struct dma_map_ops *gx_pci_dma_map_ops; extern const struct dma_map_ops *gx_pci_dma_map_ops;
extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops; extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
if (dev && dev->archdata.dma_ops) return tile_dma_map_ops;
return dev->archdata.dma_ops;
else
return tile_dma_map_ops;
} }
static inline dma_addr_t get_dma_offset(struct device *dev) static inline dma_addr_t get_dma_offset(struct device *dev)
...@@ -59,11 +56,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -59,11 +56,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static inline void dma_mark_clean(void *addr, size_t size) {} static inline void dma_mark_clean(void *addr, size_t size) {}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
if (!dev->dma_mask) if (!dev->dma_mask)
......
...@@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask) ...@@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
static struct dma_map_ops tile_default_dma_map_ops = { static const struct dma_map_ops tile_default_dma_map_ops = {
.alloc = tile_dma_alloc_coherent, .alloc = tile_dma_alloc_coherent,
.free = tile_dma_free_coherent, .free = tile_dma_free_coherent,
.map_page = tile_dma_map_page, .map_page = tile_dma_map_page,
...@@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = { ...@@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = {
.dma_supported = tile_dma_supported .dma_supported = tile_dma_supported
}; };
struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
EXPORT_SYMBOL(tile_dma_map_ops); EXPORT_SYMBOL(tile_dma_map_ops);
/* Generic PCI DMA mapping functions */ /* Generic PCI DMA mapping functions */
...@@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask) ...@@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
static struct dma_map_ops tile_pci_default_dma_map_ops = { static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.alloc = tile_pci_dma_alloc_coherent, .alloc = tile_pci_dma_alloc_coherent,
.free = tile_pci_dma_free_coherent, .free = tile_pci_dma_free_coherent,
.map_page = tile_pci_dma_map_page, .map_page = tile_pci_dma_map_page,
...@@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = { ...@@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = {
.dma_supported = tile_pci_dma_supported .dma_supported = tile_pci_dma_supported
}; };
struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
EXPORT_SYMBOL(gx_pci_dma_map_ops); EXPORT_SYMBOL(gx_pci_dma_map_ops);
/* PCI DMA mapping functions for legacy PCI devices */ /* PCI DMA mapping functions for legacy PCI devices */
...@@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size, ...@@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_addr); swiotlb_free_coherent(dev, size, vaddr, dma_addr);
} }
static struct dma_map_ops pci_swiotlb_dma_ops = { static const struct dma_map_ops pci_swiotlb_dma_ops = {
.alloc = tile_swiotlb_alloc_coherent, .alloc = tile_swiotlb_alloc_coherent,
.free = tile_swiotlb_free_coherent, .free = tile_swiotlb_free_coherent,
.map_page = swiotlb_map_page, .map_page = swiotlb_map_page,
...@@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = { ...@@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
}; };
static struct dma_map_ops pci_hybrid_dma_ops = { static const struct dma_map_ops pci_hybrid_dma_ops = {
.alloc = tile_swiotlb_alloc_coherent, .alloc = tile_swiotlb_alloc_coherent,
.free = tile_swiotlb_free_coherent, .free = tile_swiotlb_free_coherent,
.map_page = tile_pci_dma_map_page, .map_page = tile_pci_dma_map_page,
...@@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = { ...@@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = {
.dma_supported = tile_pci_dma_supported .dma_supported = tile_pci_dma_supported
}; };
struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
#else #else
struct dma_map_ops *gx_legacy_pci_dma_map_ops; const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
struct dma_map_ops *gx_hybrid_pci_dma_map_ops; const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
#endif #endif
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
int dma_set_mask(struct device *dev, u64 mask) int dma_set_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
/* /*
* For PCI devices with 64-bit DMA addressing capability, promote * For PCI devices with 64-bit DMA addressing capability, promote
...@@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask); ...@@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask);
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask) int dma_set_coherent_mask(struct device *dev, u64 mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); const struct dma_map_ops *dma_ops = get_dma_ops(dev);
/* /*
* For PCI devices with 64-bit DMA addressing capability, promote * For PCI devices with 64-bit DMA addressing capability, promote
......
...@@ -21,9 +21,9 @@ ...@@ -21,9 +21,9 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
extern struct dma_map_ops swiotlb_dma_map_ops; extern const struct dma_map_ops swiotlb_dma_map_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &swiotlb_dma_map_ops; return &swiotlb_dma_map_ops;
} }
......
...@@ -31,7 +31,7 @@ static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, ...@@ -31,7 +31,7 @@ static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_addr); swiotlb_free_coherent(dev, size, vaddr, dma_addr);
} }
struct dma_map_ops swiotlb_dma_map_ops = { const struct dma_map_ops swiotlb_dma_map_ops = {
.alloc = unicore_swiotlb_alloc_coherent, .alloc = unicore_swiotlb_alloc_coherent,
.free = unicore_swiotlb_free_coherent, .free = unicore_swiotlb_free_coherent,
.map_sg = swiotlb_map_sg_attrs, .map_sg = swiotlb_map_sg_attrs,
......
...@@ -2,9 +2,6 @@ ...@@ -2,9 +2,6 @@
#define _ASM_X86_DEVICE_H #define _ASM_X86_DEVICE_H
struct dev_archdata { struct dev_archdata {
#ifdef CONFIG_X86_DEV_DMA_OPS
struct dma_map_ops *dma_ops;
#endif
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */ void *iommu; /* hook for IOMMU specific extension */
#endif #endif
...@@ -13,7 +10,7 @@ struct dev_archdata { ...@@ -13,7 +10,7 @@ struct dev_archdata {
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
struct dma_domain { struct dma_domain {
struct list_head node; struct list_head node;
struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
int domain_nr; int domain_nr;
}; };
void add_dma_domain(struct dma_domain *domain); void add_dma_domain(struct dma_domain *domain);
......
...@@ -25,18 +25,11 @@ extern int iommu_merge; ...@@ -25,18 +25,11 @@ extern int iommu_merge;
extern struct device x86_dma_fallback_dev; extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow; extern int panic_on_overflow;
extern struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
#ifndef CONFIG_X86_DEV_DMA_OPS
return dma_ops; return dma_ops;
#else
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
else
return dev->archdata.dma_ops;
#endif
} }
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
......
#ifndef _ASM_X86_IOMMU_H #ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H
extern struct dma_map_ops nommu_dma_ops; extern const struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern int iommu_pass_through; extern int iommu_pass_through;
......
...@@ -695,7 +695,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info) ...@@ -695,7 +695,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info)
return -1; return -1;
} }
static struct dma_map_ops gart_dma_ops = { static const struct dma_map_ops gart_dma_ops = {
.map_sg = gart_map_sg, .map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg, .unmap_sg = gart_unmap_sg,
.map_page = gart_map_page, .map_page = gart_map_page,
......
...@@ -478,7 +478,7 @@ static void calgary_free_coherent(struct device *dev, size_t size, ...@@ -478,7 +478,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
} }
static struct dma_map_ops calgary_dma_ops = { static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent, .alloc = calgary_alloc_coherent,
.free = calgary_free_coherent, .free = calgary_free_coherent,
.map_sg = calgary_map_sg, .map_sg = calgary_map_sg,
...@@ -1177,7 +1177,7 @@ static int __init calgary_init(void) ...@@ -1177,7 +1177,7 @@ static int __init calgary_init(void)
tbl = find_iommu_table(&dev->dev); tbl = find_iommu_table(&dev->dev);
if (translation_enabled(tbl)) if (translation_enabled(tbl))
dev->dev.archdata.dma_ops = &calgary_dma_ops; dev->dev.dma_ops = &calgary_dma_ops;
} }
return ret; return ret;
...@@ -1201,7 +1201,7 @@ static int __init calgary_init(void) ...@@ -1201,7 +1201,7 @@ static int __init calgary_init(void)
calgary_disable_translation(dev); calgary_disable_translation(dev);
calgary_free_bus(dev); calgary_free_bus(dev);
pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
dev->dev.archdata.dma_ops = NULL; dev->dev.dma_ops = NULL;
} while (1); } while (1);
return ret; return ret;
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
static int forbid_dac __read_mostly; static int forbid_dac __read_mostly;
struct dma_map_ops *dma_ops = &nommu_dma_ops; const struct dma_map_ops *dma_ops = &nommu_dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly; static int iommu_sac_force __read_mostly;
...@@ -215,7 +215,7 @@ early_param("iommu", iommu_setup); ...@@ -215,7 +215,7 @@ early_param("iommu", iommu_setup);
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) { if (mask > 0xffffffff && forbid_dac > 0) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment