Commit 5fc85679 authored by Dragos Tatulea's avatar Dragos Tatulea Committed by Michael S. Tsirkin

vdpa/mlx5: Extract mr members in own resource struct

Group all mapping related resources into their own structure.

Upcoming patches will add more members in this new structure.
Signed-off-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: default avatarCosmin Ratiu <cratiu@nvidia.com>
Acked-by: default avatarEugenio Pérez <eperezma@redhat.com>
Message-Id: <20240830105838.2666587-6-dtatulea@nvidia.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 0b916a9c
...@@ -83,10 +83,18 @@ enum { ...@@ -83,10 +83,18 @@ enum {
MLX5_VDPA_NUM_AS = 2 MLX5_VDPA_NUM_AS = 2
}; };
struct mlx5_vdpa_mr_resources {
struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
struct list_head mr_list_head;
struct mutex mr_mtx;
};
struct mlx5_vdpa_dev { struct mlx5_vdpa_dev {
struct vdpa_device vdev; struct vdpa_device vdev;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5_vdpa_resources res; struct mlx5_vdpa_resources res;
struct mlx5_vdpa_mr_resources mres;
u64 mlx_features; u64 mlx_features;
u64 actual_features; u64 actual_features;
...@@ -95,13 +103,8 @@ struct mlx5_vdpa_dev { ...@@ -95,13 +103,8 @@ struct mlx5_vdpa_dev {
u16 max_idx; u16 max_idx;
u32 generation; u32 generation;
struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
struct list_head mr_list_head;
/* serialize mr access */
struct mutex mr_mtx;
struct mlx5_control_vq cvq; struct mlx5_control_vq cvq;
struct workqueue_struct *wq; struct workqueue_struct *wq;
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
bool suspended; bool suspended;
struct mlx5_async_ctx async_ctx; struct mlx5_async_ctx async_ctx;
......
...@@ -666,9 +666,9 @@ static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -666,9 +666,9 @@ static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr) struct mlx5_vdpa_mr *mr)
{ {
mutex_lock(&mvdev->mr_mtx); mutex_lock(&mvdev->mres.mr_mtx);
_mlx5_vdpa_put_mr(mvdev, mr); _mlx5_vdpa_put_mr(mvdev, mr);
mutex_unlock(&mvdev->mr_mtx); mutex_unlock(&mvdev->mres.mr_mtx);
} }
static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
...@@ -683,39 +683,39 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -683,39 +683,39 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr) struct mlx5_vdpa_mr *mr)
{ {
mutex_lock(&mvdev->mr_mtx); mutex_lock(&mvdev->mres.mr_mtx);
_mlx5_vdpa_get_mr(mvdev, mr); _mlx5_vdpa_get_mr(mvdev, mr);
mutex_unlock(&mvdev->mr_mtx); mutex_unlock(&mvdev->mres.mr_mtx);
} }
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *new_mr, struct mlx5_vdpa_mr *new_mr,
unsigned int asid) unsigned int asid)
{ {
struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid]; struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid];
mutex_lock(&mvdev->mr_mtx); mutex_lock(&mvdev->mres.mr_mtx);
_mlx5_vdpa_put_mr(mvdev, old_mr); _mlx5_vdpa_put_mr(mvdev, old_mr);
mvdev->mr[asid] = new_mr; mvdev->mres.mr[asid] = new_mr;
mutex_unlock(&mvdev->mr_mtx); mutex_unlock(&mvdev->mres.mr_mtx);
} }
static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
{ {
struct mlx5_vdpa_mr *mr; struct mlx5_vdpa_mr *mr;
mutex_lock(&mvdev->mr_mtx); mutex_lock(&mvdev->mres.mr_mtx);
list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) { list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) {
mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: " mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
"mr: %p, mkey: 0x%x, refcount: %u\n", "mr: %p, mkey: 0x%x, refcount: %u\n",
mr, mr->mkey, refcount_read(&mr->refcount)); mr, mr->mkey, refcount_read(&mr->refcount));
} }
mutex_unlock(&mvdev->mr_mtx); mutex_unlock(&mvdev->mres.mr_mtx);
} }
...@@ -756,7 +756,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -756,7 +756,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (err) if (err)
goto err_iotlb; goto err_iotlb;
list_add_tail(&mr->mr_list, &mvdev->mr_list_head); list_add_tail(&mr->mr_list, &mvdev->mres.mr_list_head);
return 0; return 0;
...@@ -782,9 +782,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -782,9 +782,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mutex_lock(&mvdev->mr_mtx); mutex_lock(&mvdev->mres.mr_mtx);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
mutex_unlock(&mvdev->mr_mtx); mutex_unlock(&mvdev->mres.mr_mtx);
if (err) if (err)
goto out_err; goto out_err;
...@@ -804,7 +804,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev, ...@@ -804,7 +804,7 @@ int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
{ {
int err; int err;
if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid) if (mvdev->mres.group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
return 0; return 0;
spin_lock(&mvdev->cvq.iommu_lock); spin_lock(&mvdev->cvq.iommu_lock);
......
...@@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
mlx5_vdpa_warn(mvdev, "resources already allocated\n"); mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL; return -EINVAL;
} }
mutex_init(&mvdev->mr_mtx); mutex_init(&mvdev->mres.mr_mtx);
res->uar = mlx5_get_uars_page(mdev); res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) { if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar); err = PTR_ERR(res->uar);
...@@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
err_uctx: err_uctx:
mlx5_put_uars_page(mdev, res->uar); mlx5_put_uars_page(mdev, res->uar);
err_uars: err_uars:
mutex_destroy(&mvdev->mr_mtx); mutex_destroy(&mvdev->mres.mr_mtx);
return err; return err;
} }
...@@ -318,7 +318,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -318,7 +318,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
dealloc_pd(mvdev, res->pdn, res->uid); dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid); destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar); mlx5_put_uars_page(mvdev->mdev, res->uar);
mutex_destroy(&mvdev->mr_mtx); mutex_destroy(&mvdev->mres.mr_mtx);
res->valid = false; res->valid = false;
} }
......
...@@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, ...@@ -941,11 +941,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr) if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr && if (vq_desc_mr &&
MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey); MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
...@@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, ...@@ -953,11 +953,11 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev,
/* If there is no mr update, make sure that the existing ones are set /* If there is no mr update, make sure that the existing ones are set
* modify to ready. * modify to ready.
*/ */
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr) if (vq_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY;
vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; vq_desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (vq_desc_mr) if (vq_desc_mr)
mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY;
} }
...@@ -1354,7 +1354,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev, ...@@ -1354,7 +1354,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
} }
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; vq_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP]];
if (vq_mr) if (vq_mr)
MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
...@@ -1363,7 +1363,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev, ...@@ -1363,7 +1363,7 @@ static void fill_modify_virtqueue_cmd(struct mlx5_vdpa_net *ndev,
} }
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; desc_mr = mvdev->mres.mr[mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey);
...@@ -1381,8 +1381,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev, ...@@ -1381,8 +1381,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) {
unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]; unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_GROUP];
struct mlx5_vdpa_mr *vq_mr = mvdev->mr[asid]; struct mlx5_vdpa_mr *vq_mr = mvdev->mres.mr[asid];
mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); mlx5_vdpa_put_mr(mvdev, mvq->vq_mr);
mlx5_vdpa_get_mr(mvdev, vq_mr); mlx5_vdpa_get_mr(mvdev, vq_mr);
...@@ -1390,8 +1390,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev, ...@@ -1390,8 +1390,8 @@ static void modify_virtqueue_end(struct mlx5_vdpa_net *ndev,
} }
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) {
unsigned int asid = mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]; unsigned int asid = mvdev->mres.group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP];
struct mlx5_vdpa_mr *desc_mr = mvdev->mr[asid]; struct mlx5_vdpa_mr *desc_mr = mvdev->mres.mr[asid];
mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); mlx5_vdpa_put_mr(mvdev, mvq->desc_mr);
mlx5_vdpa_get_mr(mvdev, desc_mr); mlx5_vdpa_get_mr(mvdev, desc_mr);
...@@ -3235,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev) ...@@ -3235,7 +3235,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
/* default mapping all groups are mapped to asid 0 */ /* default mapping all groups are mapped to asid 0 */
for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++) for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
mvdev->group2asid[i] = 0; mvdev->mres.group2asid[i] = 0;
} }
static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev) static bool needs_vqs_reset(const struct mlx5_vdpa_dev *mvdev)
...@@ -3353,7 +3353,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, ...@@ -3353,7 +3353,7 @@ static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
new_mr = NULL; new_mr = NULL;
} }
if (!mvdev->mr[asid]) { if (!mvdev->mres.mr[asid]) {
mlx5_vdpa_update_mr(mvdev, new_mr, asid); mlx5_vdpa_update_mr(mvdev, new_mr, asid);
} else { } else {
err = mlx5_vdpa_change_map(mvdev, new_mr, asid); err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
...@@ -3637,12 +3637,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, ...@@ -3637,12 +3637,12 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
if (group >= MLX5_VDPA_NUMVQ_GROUPS) if (group >= MLX5_VDPA_NUMVQ_GROUPS)
return -EINVAL; return -EINVAL;
mvdev->group2asid[group] = asid; mvdev->mres.group2asid[group] = asid;
mutex_lock(&mvdev->mr_mtx); mutex_lock(&mvdev->mres.mr_mtx);
if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid]) if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid])
err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid); err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid);
mutex_unlock(&mvdev->mr_mtx); mutex_unlock(&mvdev->mres.mr_mtx);
return err; return err;
} }
...@@ -3962,7 +3962,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ...@@ -3962,7 +3962,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err) if (err)
goto err_mpfs; goto err_mpfs;
INIT_LIST_HEAD(&mvdev->mr_list_head); INIT_LIST_HEAD(&mvdev->mres.mr_list_head);
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
err = mlx5_vdpa_create_dma_mr(mvdev); err = mlx5_vdpa_create_dma_mr(mvdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment