Commit 0957c29f authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

IB/core: Restore I/O MMU, s390 and powerpc support

Avoid that the following error message is reported on the console
while loading an RDMA driver with I/O MMU support enabled:

DMAR: Allocating domain for mlx5_0 failed

Ensure that DMA mapping operations that use to_pci_dev() to
access to struct pci_dev see the correct PCI device. E.g. the s390
and powerpc DMA mapping operations use to_pci_dev() even with I/O
MMU support disabled.

This patch preserves the following changes of the DMA mapping updates
patch series:
- Introduction of dma_virt_ops.
- Removal of ib_device.dma_ops.
- Removal of struct ib_dma_mapping_ops.
- Removal of an if-statement from each ib_dma_*() operation.
- IB HW drivers no longer set dma_device directly.
Reported-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Reported-by: default avatarParav Pandit <parav@mellanox.com>
Fixes: commit 99db9494 ("IB/core: Remove ib_device.dma_device")
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: parav@mellanox.com
Tested-by: parav@mellanox.com
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent a1c5dd13
...@@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device, ...@@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device,
struct device *parent = device->dev.parent; struct device *parent = device->dev.parent;
WARN_ON_ONCE(!parent); WARN_ON_ONCE(!parent);
if (!device->dev.dma_ops) WARN_ON_ONCE(device->dma_device);
device->dev.dma_ops = parent->dma_ops; if (device->dev.dma_ops) {
if (!device->dev.dma_mask) /*
device->dev.dma_mask = parent->dma_mask; * The caller provided custom DMA operations. Copy the
if (!device->dev.coherent_dma_mask) * DMA-related fields that are used by e.g. dma_alloc_coherent()
device->dev.coherent_dma_mask = parent->coherent_dma_mask; * into device->dev.
*/
device->dma_device = &device->dev;
if (!device->dev.dma_mask)
device->dev.dma_mask = parent->dma_mask;
if (!device->dev.coherent_dma_mask)
device->dev.coherent_dma_mask =
parent->coherent_dma_mask;
} else {
/*
* The caller did not provide custom DMA operations. Use the
* DMA mapping operations of the parent device.
*/
device->dma_device = parent;
}
mutex_lock(&device_mutex); mutex_lock(&device_mutex);
......
...@@ -1863,6 +1863,9 @@ struct ib_port_immutable { ...@@ -1863,6 +1863,9 @@ struct ib_port_immutable {
}; };
struct ib_device { struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
char name[IB_DEVICE_NAME_MAX]; char name[IB_DEVICE_NAME_MAX];
struct list_head event_handler_list; struct list_head event_handler_list;
...@@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) ...@@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
*/ */
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{ {
return dma_mapping_error(&dev->dev, dma_addr); return dma_mapping_error(dev->dma_device, dma_addr);
} }
/** /**
...@@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, ...@@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size, void *cpu_addr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
return dma_map_single(&dev->dev, cpu_addr, size, direction); return dma_map_single(dev->dma_device, cpu_addr, size, direction);
} }
/** /**
...@@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, ...@@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size, u64 addr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
dma_unmap_single(&dev->dev, addr, size, direction); dma_unmap_single(dev->dma_device, addr, size, direction);
} }
/** /**
...@@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, ...@@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
size_t size, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
return dma_map_page(&dev->dev, page, offset, size, direction); return dma_map_page(dev->dma_device, page, offset, size, direction);
} }
/** /**
...@@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, ...@@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size, u64 addr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
dma_unmap_page(&dev->dev, addr, size, direction); dma_unmap_page(dev->dma_device, addr, size, direction);
} }
/** /**
...@@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, ...@@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
struct scatterlist *sg, int nents, struct scatterlist *sg, int nents,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
return dma_map_sg(&dev->dev, sg, nents, direction); return dma_map_sg(dev->dma_device, sg, nents, direction);
} }
/** /**
...@@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, ...@@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents, struct scatterlist *sg, int nents,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
dma_unmap_sg(&dev->dev, sg, nents, direction); dma_unmap_sg(dev->dma_device, sg, nents, direction);
} }
static inline int ib_dma_map_sg_attrs(struct ib_device *dev, static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
...@@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, ...@@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned long dma_attrs) unsigned long dma_attrs)
{ {
return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
dma_attrs);
} }
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
...@@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, ...@@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned long dma_attrs) unsigned long dma_attrs)
{ {
dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
} }
/** /**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
...@@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, ...@@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
size_t size, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_sync_single_for_cpu(&dev->dev, addr, size, dir); dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
} }
/** /**
...@@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, ...@@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
size_t size, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_sync_single_for_device(&dev->dev, addr, size, dir); dma_sync_single_for_device(dev->dma_device, addr, size, dir);
} }
/** /**
...@@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, ...@@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
dma_addr_t *dma_handle, dma_addr_t *dma_handle,
gfp_t flag) gfp_t flag)
{ {
return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
} }
/** /**
...@@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev, ...@@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
size_t size, void *cpu_addr, size_t size, void *cpu_addr,
dma_addr_t dma_handle) dma_addr_t dma_handle)
{ {
dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment