Commit 530a5678 authored by Jason Wang's avatar Jason Wang Committed by Michael S. Tsirkin

vdpa: support packed virtqueue for set/get_vq_state()

This patch extends the vdpa_vq_state to support packed virtqueue
state which is basically the device/driver ring wrap counters and the
avail and used index. This will be used for the virito-vdpa support
for the packed virtqueue and the future vhost/vhost-vdpa support for
the packed virtqueue.
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20210602021536.39525-2-jasowang@redhat.comSigned-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: default avatarEli Cohen <elic@nvidia.com>
parent 72b5e895
......@@ -264,7 +264,7 @@ static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
state->avail_index = ifcvf_get_vq_state(vf, qid);
state->split.avail_index = ifcvf_get_vq_state(vf, qid);
return 0;
}
......@@ -273,7 +273,7 @@ static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return ifcvf_set_vq_state(vf, qid, state->avail_index);
return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
}
static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
......
......@@ -1423,8 +1423,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
return -EINVAL;
}
mvq->used_idx = state->avail_index;
mvq->avail_idx = state->avail_index;
mvq->used_idx = state->split.avail_index;
mvq->avail_idx = state->split.avail_index;
return 0;
}
......@@ -1445,7 +1445,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
* Since both values should be identical, we take the value of
* used_idx which is reported correctly.
*/
state->avail_index = mvq->used_idx;
state->split.avail_index = mvq->used_idx;
return 0;
}
......@@ -1454,7 +1454,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
return err;
}
state->avail_index = attr.used_index;
state->split.avail_index = attr.used_index;
return 0;
}
......
......@@ -374,7 +374,7 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vringh *vrh = &vq->vring;
spin_lock(&vdpasim->lock);
vrh->last_avail_idx = state->avail_index;
vrh->last_avail_idx = state->split.avail_index;
spin_unlock(&vdpasim->lock);
return 0;
......@@ -387,7 +387,7 @@ static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
state->avail_index = vrh->last_avail_idx;
state->split.avail_index = vrh->last_avail_idx;
return 0;
}
......
......@@ -383,7 +383,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (r)
return r;
vq->last_avail_idx = vq_state.avail_index;
vq->last_avail_idx = vq_state.split.avail_index;
break;
}
......@@ -401,7 +401,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
vq_state.avail_index = vq->last_avail_idx;
vq_state.split.avail_index = vq->last_avail_idx;
if (ops->set_vq_state(vdpa, idx, &vq_state))
r = -EINVAL;
break;
......
......@@ -28,13 +28,34 @@ struct vdpa_notification_area {
};
/**
* struct vdpa_vq_state - vDPA vq_state definition
* struct vdpa_vq_state_split - vDPA split virtqueue state
* @avail_index: available index
*/
struct vdpa_vq_state {
struct vdpa_vq_state_split {
u16 avail_index;
};
/**
* struct vdpa_vq_state_packed - vDPA packed virtqueue state
* @last_avail_counter: last driver ring wrap counter observed by device
* @last_avail_idx: device available index
* @last_used_counter: device ring wrap counter
* @last_used_idx: used index
*/
struct vdpa_vq_state_packed {
u16 last_avail_counter:1;
u16 last_avail_idx:15;
u16 last_used_counter:1;
u16 last_used_idx:15;
};
struct vdpa_vq_state {
union {
struct vdpa_vq_state_split split;
struct vdpa_vq_state_packed packed;
};
};
struct vdpa_mgmt_dev;
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment