Commit aab8d396 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Change mlx5_ib_populate_pas() to use rdma_for_each_block()

This routine converts the umem SGL into a list of fixed pages for DMA,
which is exactly what rdma_umem_for_each_dma_block() is for, use the
common code directly.

Link: https://lore.kernel.org/r/20201026132314.1336717-2-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent f8fb3110
...@@ -762,7 +762,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -762,7 +762,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
} }
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); mlx5_ib_populate_pas(cq->buf.umem, 1UL << page_shift, pas, 0);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size, MLX5_SET(cqc, cqc, log_page_size,
...@@ -1305,8 +1305,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) ...@@ -1305,8 +1305,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas); pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
if (udata) if (udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
pas, 0); 0);
else else
mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas); mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
......
...@@ -2115,7 +2115,7 @@ static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev, ...@@ -2115,7 +2115,7 @@ static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
MLX5_SET(umem, umem, log_page_size, obj->page_shift - MLX5_SET(umem, umem, log_page_size, obj->page_shift -
MLX5_ADAPTER_PAGE_SHIFT); MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(umem, umem, page_offset, obj->page_offset); MLX5_SET(umem, umem, page_offset, obj->page_offset);
mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt, mlx5_ib_populate_pas(obj->umem, 1UL << obj->page_shift, mtt,
(obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
MLX5_IB_MTT_READ); MLX5_IB_MTT_READ);
} }
......
...@@ -155,13 +155,22 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -155,13 +155,22 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
} }
} }
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, /*
int page_shift, __be64 *pas, int access_flags) * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
* filled in the pas array.
*/
void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags)
{ {
return __mlx5_ib_populate_pas(dev, umem, page_shift, 0, struct ib_block_iter biter;
ib_umem_num_dma_blocks(umem, PAGE_SIZE),
pas, access_flags); rdma_umem_for_each_dma_block (umem, &biter, page_size) {
*pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) |
access_flags);
pas++;
}
} }
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
{ {
u64 page_size; u64 page_size;
......
...@@ -1235,8 +1235,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, ...@@ -1235,8 +1235,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages, int page_shift, size_t offset, size_t num_pages,
__be64 *pas, int access_flags); __be64 *pas, int access_flags);
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
int page_shift, __be64 *pas, int access_flags); u64 access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
......
...@@ -1167,7 +1167,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, ...@@ -1167,7 +1167,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
inlen = MLX5_ST_SZ_BYTES(create_mkey_in); inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
if (populate) if (populate)
inlen += sizeof(*pas) * roundup(ib_umem_num_pages(umem), 2); inlen +=
sizeof(*pas) *
roundup(ib_umem_num_dma_blocks(umem, 1UL << page_shift),
2);
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { if (!in) {
err = -ENOMEM; err = -ENOMEM;
...@@ -1179,7 +1182,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, ...@@ -1179,7 +1182,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
err = -EINVAL; err = -EINVAL;
goto err_2; goto err_2;
} }
mlx5_ib_populate_pas(dev, umem, page_shift, pas, mlx5_ib_populate_pas(umem, 1ULL << page_shift, pas,
pg_cap ? MLX5_IB_MTT_PRESENT : 0); pg_cap ? MLX5_IB_MTT_PRESENT : 0);
} }
......
...@@ -971,7 +971,7 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -971,7 +971,7 @@ static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(create_qp_in, *in, uid, uid); MLX5_SET(create_qp_in, *in, uid, uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem) if (ubuffer->umem)
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0); mlx5_ib_populate_pas(ubuffer->umem, 1UL << page_shift, pas, 0);
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
...@@ -1251,7 +1251,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ...@@ -1251,7 +1251,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
MLX5_SET(wq, wq, page_offset, offset); MLX5_SET(wq, wq, page_offset, offset);
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0); mlx5_ib_populate_pas(sq->ubuffer.umem, 1UL << page_shift, pas, 0);
err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp); err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
...@@ -4885,7 +4885,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, ...@@ -4885,7 +4885,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET(rqc, rqc, delay_drop_en, 1); MLX5_SET(rqc, rqc, delay_drop_en, 1);
} }
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0); mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0);
err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp); err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
err = set_delay_drop(dev); err = set_delay_drop(dev);
......
...@@ -100,7 +100,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, ...@@ -100,7 +100,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_umem; goto err_umem;
} }
mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0); mlx5_ib_populate_pas(srq->umem, 1UL << page_shift, in->pas, 0);
err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db); err = mlx5_ib_db_map_user(ucontext, udata, ucmd.db_addr, &srq->db);
if (err) { if (err) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment