Commit 5262912e authored by Eli Cohen's avatar Eli Cohen Committed by Michael S. Tsirkin

vdpa/mlx5: Add support for control VQ and MAC setting

Add support to handle control virtqueue configurations per virtio
specification. The control virtqueue is implemented in software and no
hardware offloading is involved.

Control VQ configuration need task context, therefore all configurations
are handled in a workqueue created for the purpose.

Modifications are made to the memory registration code to allow for
saving a copy of itolb to be used by the control VQ to access the vring.

The max number of data virtqueus supported by the driver has been
updated to 2 since multiqueue is not supported at this stage and we need
to ensure consistency of VQ indices mapping to either data or control
VQ.
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20210823052123.14909-6-elic@nvidia.comSigned-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent e4fc6650
......@@ -53,6 +53,7 @@ config MLX5_VDPA
config MLX5_VDPA_NET
tristate "vDPA driver for ConnectX devices"
select MLX5_VDPA
select VHOST_RING
depends on MLX5_CORE
help
VDPA network driver for ConnectX6 and newer. Provides offloading
......
......@@ -5,6 +5,7 @@
#define __MLX5_VDPA_H__
#include <linux/etherdevice.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <linux/mlx5/driver.h>
......@@ -47,6 +48,26 @@ struct mlx5_vdpa_resources {
bool valid;
};
struct mlx5_control_vq {
struct vhost_iotlb *iotlb;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
struct vringh vring;
bool ready;
u64 desc_addr;
u64 device_addr;
u64 driver_addr;
struct vdpa_callback event_cb;
struct vringh_kiov riov;
struct vringh_kiov wiov;
unsigned short head;
};
struct mlx5_ctrl_wq_ent {
struct work_struct work;
struct mlx5_vdpa_dev *mvdev;
};
struct mlx5_vdpa_dev {
struct vdpa_device vdev;
struct mlx5_core_dev *mdev;
......@@ -60,6 +81,8 @@ struct mlx5_vdpa_dev {
u32 generation;
struct mlx5_vdpa_mr mr;
struct mlx5_control_vq cvq;
struct workqueue_struct *wq;
};
int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/vhost_types.h>
#include <linux/vdpa.h>
#include <linux/gcd.h>
#include <linux/string.h>
......@@ -451,33 +452,30 @@ static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
}
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
struct vhost_iotlb_map *map;
u64 start = 0, last = ULLONG_MAX;
int err;
if (mr->initialized)
return 0;
if (iotlb)
err = create_user_mr(mvdev, iotlb);
else
err = create_dma_mr(mvdev, mr);
if (!err)
mr->initialized = true;
if (!src) {
err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
return err;
}
return err;
for (map = vhost_iotlb_itree_first(src, start, last); map;
map = vhost_iotlb_itree_next(map, start, last)) {
err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
map->addr, map->perm);
if (err)
return err;
}
return 0;
}
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
{
int err;
mutex_lock(&mvdev->mr.mkey_mtx);
err = _mlx5_vdpa_create_mr(mvdev, iotlb);
mutex_unlock(&mvdev->mr.mkey_mtx);
return err;
vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
}
static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
......@@ -501,6 +499,7 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
if (!mr->initialized)
goto out;
prune_iotlb(mvdev);
if (mr->user_mr)
destroy_user_mr(mvdev, mr);
else
......@@ -512,6 +511,48 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
mutex_unlock(&mr->mkey_mtx);
}
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_mr *mr = &mvdev->mr;
int err;
if (mr->initialized)
return 0;
if (iotlb)
err = create_user_mr(mvdev, iotlb);
else
err = create_dma_mr(mvdev, mr);
if (err)
return err;
err = dup_iotlb(mvdev, iotlb);
if (err)
goto out_err;
mr->initialized = true;
return 0;
out_err:
if (iotlb)
destroy_user_mr(mvdev, mr);
else
destroy_dma_mr(mvdev, mr);
return err;
}
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
int err;
mutex_lock(&mvdev->mr.mkey_mtx);
err = _mlx5_vdpa_create_mr(mvdev, iotlb);
mutex_unlock(&mvdev->mr.mkey_mtx);
return err;
}
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/iova.h>
#include <linux/mlx5/driver.h>
#include "mlx5_vdpa.h"
......@@ -221,6 +222,22 @@ int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *m
return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
}
static int init_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
{
mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0);
if (!mvdev->cvq.iotlb)
return -ENOMEM;
vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock);
return 0;
}
static void cleanup_ctrl_vq(struct mlx5_vdpa_dev *mvdev)
{
vhost_iotlb_free(mvdev->cvq.iotlb);
}
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
{
u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset);
......@@ -260,10 +277,17 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
err = -ENOMEM;
goto err_key;
}
err = init_ctrl_vq(mvdev);
if (err)
goto err_ctrl;
res->valid = true;
return 0;
err_ctrl:
iounmap(res->kick_addr);
err_key:
dealloc_pd(mvdev, res->pdn, res->uid);
err_pd:
......@@ -282,6 +306,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
if (!res->valid)
return;
cleanup_ctrl_vq(mvdev);
iounmap(res->kick_addr);
res->kick_addr = NULL;
dealloc_pd(mvdev, res->pdn, res->uid);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment