Commit 7d7c9453 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Read ICM memory into dedicated buffer

Instead of using the write buffer for reading we will use a dedicated
buffer only for reading ICM memory.
Due to the new support for args, we can have a case with pending_wc
being odd number, and with reading into the same write buffer, it is
possible to overwrite next write on the same slot.
For example:
pending_wc is 17 so the buffer for write is:
   | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
and we have requests as follows:
   r wr wr wr wr wr wr wr wr
Now, the first read will be written into the last write because we use
the same buffer for read and write, before it was written to the HW and
we will have a wrong data in the ICM area.
Signed-off-by: default avatarErez Shitrit <erezsh@nvidia.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: default avatarAlex Vesker <valex@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 4605fc0a
......@@ -602,9 +602,10 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
send_ring->pending_wqe++;
send_info->read.length = send_info->write.length;
/* Read into the same write area */
send_info->read.addr = (uintptr_t)send_info->write.addr;
send_info->read.lkey = send_ring->mr->mkey;
/* Read into dedicated sync buffer */
send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr;
send_info->read.lkey = send_ring->sync_mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->read.send_flags = IB_SEND_SIGNALED;
......@@ -1288,16 +1289,25 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
goto free_mem;
}
dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size,
GFP_KERNEL);
if (!dmn->send_ring->sync_buff) {
ret = -ENOMEM;
goto clean_mr;
}
dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
dmn->pdn, dmn->send_ring->sync_buff,
MIN_READ_SYNC);
dmn->send_ring->max_post_send_size);
if (!dmn->send_ring->sync_mr) {
ret = -ENOMEM;
goto clean_mr;
goto free_sync_mem;
}
return 0;
free_sync_mem:
kfree(dmn->send_ring->sync_buff);
clean_mr:
dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
free_mem:
......@@ -1320,6 +1330,7 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
dr_dereg_mr(dmn->mdev, send_ring->mr);
kfree(send_ring->buf);
kfree(send_ring->sync_buff);
kfree(send_ring);
}
......
......@@ -1429,9 +1429,6 @@ struct mlx5dr_mr {
size_t size;
};
#define MAX_SEND_CQE 64
#define MIN_READ_SYNC 64
struct mlx5dr_send_ring {
struct mlx5dr_cq *cq;
struct mlx5dr_qp *qp;
......@@ -1446,7 +1443,7 @@ struct mlx5dr_send_ring {
u32 tx_head;
void *buf;
u32 buf_size;
u8 sync_buff[MIN_READ_SYNC];
u8 *sync_buff;
struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */
bool err_state; /* send_ring is not usable in err state */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment