Commit 17b17be2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v6.7-rc4' of https://github.com/awilliam/linux-vfio

Pull vfio fixes from Alex Williamson:

 - Fix the lifecycle of a mutex in the pds variant driver such that a
   reset prior to opening the device won't find it uninitialized.
   Implement the release path to symmetrically destroy the mutex. Also
   switch a different lock from spinlock to mutex as the code path has
   the potential to sleep and doesn't need the spinlock context
   otherwise (Brett Creeley)

 - Fix an issue detected via randconfig where KVM tries to symbol_get an
   undeclared function. The symbol is temporarily declared
   unconditionally here, which resolves the problem and avoids churn
   relative to a series pending for the next merge window which resolves
   some of this symbol ugliness, but also fixes Kconfig dependencies
   (Sean Christopherson)

* tag 'vfio-v6.7-rc4' of https://github.com/awilliam/linux-vfio:
  vfio: Drop vfio_file_iommu_group() stub to fudge around a KVM wart
  vfio/pds: Fix possible sleep while in atomic context
  vfio/pds: Fix mutex lock->magic != lock warning
parents deb4b9dd 4ea95c04
...@@ -55,10 +55,10 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio) ...@@ -55,10 +55,10 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
* VFIO_DEVICE_STATE_RUNNING. * VFIO_DEVICE_STATE_RUNNING.
*/ */
if (deferred_reset_needed) { if (deferred_reset_needed) {
spin_lock(&pds_vfio->reset_lock); mutex_lock(&pds_vfio->reset_mutex);
pds_vfio->deferred_reset = true; pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR; pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
spin_unlock(&pds_vfio->reset_lock); mutex_unlock(&pds_vfio->reset_mutex);
} }
} }
......
...@@ -29,7 +29,7 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev) ...@@ -29,7 +29,7 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio) void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
{ {
again: again:
spin_lock(&pds_vfio->reset_lock); mutex_lock(&pds_vfio->reset_mutex);
if (pds_vfio->deferred_reset) { if (pds_vfio->deferred_reset) {
pds_vfio->deferred_reset = false; pds_vfio->deferred_reset = false;
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) { if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
...@@ -39,23 +39,23 @@ void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio) ...@@ -39,23 +39,23 @@ void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
} }
pds_vfio->state = pds_vfio->deferred_reset_state; pds_vfio->state = pds_vfio->deferred_reset_state;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING; pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
spin_unlock(&pds_vfio->reset_lock); mutex_unlock(&pds_vfio->reset_mutex);
goto again; goto again;
} }
mutex_unlock(&pds_vfio->state_mutex); mutex_unlock(&pds_vfio->state_mutex);
spin_unlock(&pds_vfio->reset_lock); mutex_unlock(&pds_vfio->reset_mutex);
} }
void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio) void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
{ {
spin_lock(&pds_vfio->reset_lock); mutex_lock(&pds_vfio->reset_mutex);
pds_vfio->deferred_reset = true; pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING; pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
if (!mutex_trylock(&pds_vfio->state_mutex)) { if (!mutex_trylock(&pds_vfio->state_mutex)) {
spin_unlock(&pds_vfio->reset_lock); mutex_unlock(&pds_vfio->reset_mutex);
return; return;
} }
spin_unlock(&pds_vfio->reset_lock); mutex_unlock(&pds_vfio->reset_mutex);
pds_vfio_state_mutex_unlock(pds_vfio); pds_vfio_state_mutex_unlock(pds_vfio);
} }
...@@ -155,6 +155,9 @@ static int pds_vfio_init_device(struct vfio_device *vdev) ...@@ -155,6 +155,9 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
pds_vfio->vf_id = vf_id; pds_vfio->vf_id = vf_id;
mutex_init(&pds_vfio->state_mutex);
mutex_init(&pds_vfio->reset_mutex);
vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P; vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
vdev->mig_ops = &pds_vfio_lm_ops; vdev->mig_ops = &pds_vfio_lm_ops;
vdev->log_ops = &pds_vfio_log_ops; vdev->log_ops = &pds_vfio_log_ops;
...@@ -168,6 +171,17 @@ static int pds_vfio_init_device(struct vfio_device *vdev) ...@@ -168,6 +171,17 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
return 0; return 0;
} }
static void pds_vfio_release_device(struct vfio_device *vdev)
{
struct pds_vfio_pci_device *pds_vfio =
container_of(vdev, struct pds_vfio_pci_device,
vfio_coredev.vdev);
mutex_destroy(&pds_vfio->state_mutex);
mutex_destroy(&pds_vfio->reset_mutex);
vfio_pci_core_release_dev(vdev);
}
static int pds_vfio_open_device(struct vfio_device *vdev) static int pds_vfio_open_device(struct vfio_device *vdev)
{ {
struct pds_vfio_pci_device *pds_vfio = struct pds_vfio_pci_device *pds_vfio =
...@@ -179,7 +193,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev) ...@@ -179,7 +193,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev)
if (err) if (err)
return err; return err;
mutex_init(&pds_vfio->state_mutex);
pds_vfio->state = VFIO_DEVICE_STATE_RUNNING; pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING; pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
...@@ -199,14 +212,13 @@ static void pds_vfio_close_device(struct vfio_device *vdev) ...@@ -199,14 +212,13 @@ static void pds_vfio_close_device(struct vfio_device *vdev)
pds_vfio_put_save_file(pds_vfio); pds_vfio_put_save_file(pds_vfio);
pds_vfio_dirty_disable(pds_vfio, true); pds_vfio_dirty_disable(pds_vfio, true);
mutex_unlock(&pds_vfio->state_mutex); mutex_unlock(&pds_vfio->state_mutex);
mutex_destroy(&pds_vfio->state_mutex);
vfio_pci_core_close_device(vdev); vfio_pci_core_close_device(vdev);
} }
static const struct vfio_device_ops pds_vfio_ops = { static const struct vfio_device_ops pds_vfio_ops = {
.name = "pds-vfio", .name = "pds-vfio",
.init = pds_vfio_init_device, .init = pds_vfio_init_device,
.release = vfio_pci_core_release_dev, .release = pds_vfio_release_device,
.open_device = pds_vfio_open_device, .open_device = pds_vfio_open_device,
.close_device = pds_vfio_close_device, .close_device = pds_vfio_close_device,
.ioctl = vfio_pci_core_ioctl, .ioctl = vfio_pci_core_ioctl,
......
...@@ -18,7 +18,7 @@ struct pds_vfio_pci_device { ...@@ -18,7 +18,7 @@ struct pds_vfio_pci_device {
struct pds_vfio_dirty dirty; struct pds_vfio_dirty dirty;
struct mutex state_mutex; /* protect migration state */ struct mutex state_mutex; /* protect migration state */
enum vfio_device_mig_state state; enum vfio_device_mig_state state;
spinlock_t reset_lock; /* protect reset_done flow */ struct mutex reset_mutex; /* protect reset_done flow */
u8 deferred_reset; u8 deferred_reset;
enum vfio_device_mig_state deferred_reset_state; enum vfio_device_mig_state deferred_reset_state;
struct notifier_block nb; struct notifier_block nb;
......
...@@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes, ...@@ -289,16 +289,12 @@ void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
/* /*
* External user API * External user API
*/ */
#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct iommu_group *vfio_file_iommu_group(struct file *file); struct iommu_group *vfio_file_iommu_group(struct file *file);
#if IS_ENABLED(CONFIG_VFIO_GROUP)
bool vfio_file_is_group(struct file *file); bool vfio_file_is_group(struct file *file);
bool vfio_file_has_dev(struct file *file, struct vfio_device *device); bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#else #else
static inline struct iommu_group *vfio_file_iommu_group(struct file *file)
{
return NULL;
}
static inline bool vfio_file_is_group(struct file *file) static inline bool vfio_file_is_group(struct file *file)
{ {
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment