Commit d9703650 authored by Parav Pandit's avatar Parav Pandit Committed by Doug Ledford

IB/{rxe,core,rdmavt}: Fix kernel crash for reg MR

This patch fixes below kernel crash on memory registration for rxe
and other transport drivers which has dma_ops extension.

IB/core invokes ib_map_sg_attrs() in generic manner with dma attributes
which is used by mlx5 and mthca adapters.  However in doing so it
ignored honoring dma_ops extension of software based transports for
sg map/unmap operation.  This results in calling dma_map_sg_attrs of
hardware virtual device resulting in crash for null reference.

We extend the core to support sg_map/unmap_attrs and transport drivers
to implement those dma_ops callback functions.

Verified usign perftest applications.

BUG: unable to handle kernel NULL pointer dereference at           (null)
IP: [<ffffffff81032a75>] check_addr+0x35/0x60
...
Call Trace:
 [<ffffffff81032b39>] ? nommu_map_sg+0x99/0xd0
 [<ffffffffa02b31c6>] ib_umem_get+0x3d6/0x470 [ib_core]
 [<ffffffffa01cc329>] rxe_mem_init_user+0x49/0x270 [rdma_rxe]
 [<ffffffffa01c793a>] ? rxe_add_index+0xca/0x100 [rdma_rxe]
 [<ffffffffa01c995f>] rxe_reg_user_mr+0x9f/0x130 [rdma_rxe]
 [<ffffffffa00419fe>] ib_uverbs_reg_mr+0x14e/0x2c0 [ib_uverbs]
 [<ffffffffa003d3ab>] ib_uverbs_write+0x15b/0x3b0 [ib_uverbs]
 [<ffffffff811e92a6>] ? mem_cgroup_commit_charge+0x76/0xe0
 [<ffffffff811af0a9>] ? page_add_new_anon_rmap+0x89/0xc0
 [<ffffffff8117e6c9>] ? lru_cache_add_active_or_unevictable+0x39/0xc0
 [<ffffffff811f0da8>] __vfs_write+0x28/0x120
 [<ffffffff811f1239>] ? rw_verify_area+0x49/0xb0
 [<ffffffff811f1492>] vfs_write+0xb2/0x1b0
 [<ffffffff811f27d6>] SyS_write+0x46/0xa0
 [<ffffffff814f7d32>] entry_SYSCALL_64_fastpath+0x1a/0xa4
Signed-off-by: default avatarParav Pandit <pandit.parav@gmail.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent ffae955d
...@@ -138,6 +138,21 @@ static void rvt_unmap_sg(struct ib_device *dev, ...@@ -138,6 +138,21 @@ static void rvt_unmap_sg(struct ib_device *dev,
/* This is a stub, nothing to be done here */ /* This is a stub, nothing to be done here */
} }
static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
return rvt_map_sg(dev, sgl, nents, direction);
}
static void rvt_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs)
{
return rvt_unmap_sg(dev, sg, nents, direction);
}
static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr, static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
...@@ -177,6 +192,8 @@ struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = { ...@@ -177,6 +192,8 @@ struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
.unmap_page = rvt_dma_unmap_page, .unmap_page = rvt_dma_unmap_page,
.map_sg = rvt_map_sg, .map_sg = rvt_map_sg,
.unmap_sg = rvt_unmap_sg, .unmap_sg = rvt_unmap_sg,
.map_sg_attrs = rvt_map_sg_attrs,
.unmap_sg_attrs = rvt_unmap_sg_attrs,
.sync_single_for_cpu = rvt_sync_single_for_cpu, .sync_single_for_cpu = rvt_sync_single_for_cpu,
.sync_single_for_device = rvt_sync_single_for_device, .sync_single_for_device = rvt_sync_single_for_device,
.alloc_coherent = rvt_dma_alloc_coherent, .alloc_coherent = rvt_dma_alloc_coherent,
......
...@@ -117,6 +117,21 @@ static void rxe_unmap_sg(struct ib_device *dev, ...@@ -117,6 +117,21 @@ static void rxe_unmap_sg(struct ib_device *dev,
WARN_ON(!valid_dma_direction(direction)); WARN_ON(!valid_dma_direction(direction));
} }
static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
return rxe_map_sg(dev, sgl, nents, direction);
}
static void rxe_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs)
{
rxe_unmap_sg(dev, sg, nents, direction);
}
static void rxe_sync_single_for_cpu(struct ib_device *dev, static void rxe_sync_single_for_cpu(struct ib_device *dev,
u64 addr, u64 addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
...@@ -159,6 +174,8 @@ struct ib_dma_mapping_ops rxe_dma_mapping_ops = { ...@@ -159,6 +174,8 @@ struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
.unmap_page = rxe_dma_unmap_page, .unmap_page = rxe_dma_unmap_page,
.map_sg = rxe_map_sg, .map_sg = rxe_map_sg,
.unmap_sg = rxe_unmap_sg, .unmap_sg = rxe_unmap_sg,
.map_sg_attrs = rxe_map_sg_attrs,
.unmap_sg_attrs = rxe_unmap_sg_attrs,
.sync_single_for_cpu = rxe_sync_single_for_cpu, .sync_single_for_cpu = rxe_sync_single_for_cpu,
.sync_single_for_device = rxe_sync_single_for_device, .sync_single_for_device = rxe_sync_single_for_device,
.alloc_coherent = rxe_dma_alloc_coherent, .alloc_coherent = rxe_dma_alloc_coherent,
......
...@@ -1739,6 +1739,14 @@ struct ib_dma_mapping_ops { ...@@ -1739,6 +1739,14 @@ struct ib_dma_mapping_ops {
void (*unmap_sg)(struct ib_device *dev, void (*unmap_sg)(struct ib_device *dev,
struct scatterlist *sg, int nents, struct scatterlist *sg, int nents,
enum dma_data_direction direction); enum dma_data_direction direction);
int (*map_sg_attrs)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs);
void (*unmap_sg_attrs)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs);
void (*sync_single_for_cpu)(struct ib_device *dev, void (*sync_single_for_cpu)(struct ib_device *dev,
u64 dma_handle, u64 dma_handle,
size_t size, size_t size,
...@@ -3000,8 +3008,12 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, ...@@ -3000,8 +3008,12 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned long dma_attrs) unsigned long dma_attrs)
{ {
return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, if (dev->dma_ops)
dma_attrs); return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
dma_attrs);
else
return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
dma_attrs);
} }
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
...@@ -3009,7 +3021,12 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, ...@@ -3009,7 +3021,12 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned long dma_attrs) unsigned long dma_attrs)
{ {
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); if (dev->dma_ops)
return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
dma_attrs);
else
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
dma_attrs);
} }
/** /**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment