Commit d4ec3d55 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v5.16-rc1' of git://github.com/awilliam/linux-vfio

Pull VFIO updates from Alex Williamson:

 - Cleanup vfio iommu_group creation (Christoph Hellwig)

 - Add individual device reset for vfio/fsl-mc (Diana Craciun)

 - IGD OpRegion 2.0+ support (Colin Xu)

 - Use modern cdev lifecycle for vfio_group (Jason Gunthorpe)

 - Use new mdev API in vfio_ccw (Jason Gunthorpe)

* tag 'vfio-v5.16-rc1' of git://github.com/awilliam/linux-vfio: (27 commits)
  vfio/ccw: Convert to use vfio_register_emulated_iommu_dev()
  vfio/ccw: Pass vfio_ccw_private not mdev_device to various functions
  vfio/ccw: Use functions for alloc/free of the vfio_ccw_private
  vfio/ccw: Remove unneeded GFP_DMA
  vfio: Use cdev_device_add() instead of device_create()
  vfio: Use a refcount_t instead of a kref in the vfio_group
  vfio: Don't leak a group reference if the group already exists
  vfio: Do not open code the group list search in vfio_create_group()
  vfio: Delete vfio_get/put_group from vfio_iommu_group_notifier()
  vfio/pci: Add OpRegion 2.0+ Extended VBT support.
  vfio/iommu_type1: remove IS_IOMMU_CAP_DOMAIN_IN_CONTAINER
  vfio/iommu_type1: remove the "external" domain
  vfio/iommu_type1: initialize pgsize_bitmap in ->open
  vfio/spapr_tce: reject mediated devices
  vfio: clean up the check for mediated device in vfio_iommu_type1
  vfio: remove the unused mdev iommu hook
  vfio: move the vfio_iommu_driver_ops interface out of <linux/vfio.h>
  vfio: remove unused method from vfio_iommu_driver_ops
  vfio: simplify iommu group allocation for mediated devices
  vfio: remove the iommudata hack for noiommu groups
  ...
parents a602285a 3bf1311f
...@@ -15,7 +15,8 @@ mc-bus-driver-objs := fsl-mc-bus.o \ ...@@ -15,7 +15,8 @@ mc-bus-driver-objs := fsl-mc-bus.o \
dprc-driver.o \ dprc-driver.o \
fsl-mc-allocator.o \ fsl-mc-allocator.o \
fsl-mc-msi.o \ fsl-mc-msi.o \
dpmcp.o dpmcp.o \
obj-api.o
# MC userspace support # MC userspace support
obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
...@@ -48,7 +48,6 @@ struct dpmng_rsp_get_version { ...@@ -48,7 +48,6 @@ struct dpmng_rsp_get_version {
/* DPMCP command IDs */ /* DPMCP command IDs */
#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800) #define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
#define DPMCP_CMDID_RESET DPMCP_CMD(0x005) #define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
struct dpmcp_cmd_open { struct dpmcp_cmd_open {
...@@ -91,7 +90,6 @@ int dpmcp_reset(struct fsl_mc_io *mc_io, ...@@ -91,7 +90,6 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* DPRC command IDs */ /* DPRC command IDs */
#define DPRC_CMDID_CLOSE DPRC_CMD(0x800) #define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05) #define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004) #define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
...@@ -453,7 +451,6 @@ int dprc_get_connection(struct fsl_mc_io *mc_io, ...@@ -453,7 +451,6 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
/* Command IDs */ /* Command IDs */
#define DPBP_CMDID_CLOSE DPBP_CMD(0x800) #define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
#define DPBP_CMDID_ENABLE DPBP_CMD(0x002) #define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
#define DPBP_CMDID_DISABLE DPBP_CMD(0x003) #define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
...@@ -492,7 +489,6 @@ struct dpbp_rsp_get_attributes { ...@@ -492,7 +489,6 @@ struct dpbp_rsp_get_attributes {
/* Command IDs */ /* Command IDs */
#define DPCON_CMDID_CLOSE DPCON_CMD(0x800) #define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
#define DPCON_CMDID_ENABLE DPCON_CMD(0x002) #define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
#define DPCON_CMDID_DISABLE DPCON_CMD(0x003) #define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
...@@ -524,6 +520,41 @@ struct dpcon_cmd_set_notification { ...@@ -524,6 +520,41 @@ struct dpcon_cmd_set_notification {
__le64 user_ctx; __le64 user_ctx;
}; };
/*
* Generic FSL MC API
*/
/* generic command versioning */
#define OBJ_CMD_BASE_VERSION 1
#define OBJ_CMD_ID_OFFSET 4
#define OBJ_CMD(id) (((id) << OBJ_CMD_ID_OFFSET) | OBJ_CMD_BASE_VERSION)
/* open command codes */
#define DPRTC_CMDID_OPEN OBJ_CMD(0x810)
#define DPNI_CMDID_OPEN OBJ_CMD(0x801)
#define DPSW_CMDID_OPEN OBJ_CMD(0x802)
#define DPIO_CMDID_OPEN OBJ_CMD(0x803)
#define DPBP_CMDID_OPEN OBJ_CMD(0x804)
#define DPRC_CMDID_OPEN OBJ_CMD(0x805)
#define DPDMUX_CMDID_OPEN OBJ_CMD(0x806)
#define DPCI_CMDID_OPEN OBJ_CMD(0x807)
#define DPCON_CMDID_OPEN OBJ_CMD(0x808)
#define DPSECI_CMDID_OPEN OBJ_CMD(0x809)
#define DPAIOP_CMDID_OPEN OBJ_CMD(0x80a)
#define DPMCP_CMDID_OPEN OBJ_CMD(0x80b)
#define DPMAC_CMDID_OPEN OBJ_CMD(0x80c)
#define DPDCEI_CMDID_OPEN OBJ_CMD(0x80d)
#define DPDMAI_CMDID_OPEN OBJ_CMD(0x80e)
#define DPDBG_CMDID_OPEN OBJ_CMD(0x80f)
/* Generic object command IDs */
#define OBJ_CMDID_CLOSE OBJ_CMD(0x800)
#define OBJ_CMDID_RESET OBJ_CMD(0x005)
struct fsl_mc_obj_cmd_open {
__le32 obj_id;
};
/** /**
* struct fsl_mc_resource_pool - Pool of MC resources of a given * struct fsl_mc_resource_pool - Pool of MC resources of a given
......
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2021 NXP
*
*/
#include <linux/kernel.h>
#include <linux/fsl/mc.h>
#include "fsl-mc-private.h"
static int fsl_mc_get_open_cmd_id(const char *type)
{
static const struct {
int cmd_id;
const char *type;
} dev_ids[] = {
{ DPRTC_CMDID_OPEN, "dprtc" },
{ DPRC_CMDID_OPEN, "dprc" },
{ DPNI_CMDID_OPEN, "dpni" },
{ DPIO_CMDID_OPEN, "dpio" },
{ DPSW_CMDID_OPEN, "dpsw" },
{ DPBP_CMDID_OPEN, "dpbp" },
{ DPCON_CMDID_OPEN, "dpcon" },
{ DPMCP_CMDID_OPEN, "dpmcp" },
{ DPMAC_CMDID_OPEN, "dpmac" },
{ DPSECI_CMDID_OPEN, "dpseci" },
{ DPDMUX_CMDID_OPEN, "dpdmux" },
{ DPDCEI_CMDID_OPEN, "dpdcei" },
{ DPAIOP_CMDID_OPEN, "dpaiop" },
{ DPCI_CMDID_OPEN, "dpci" },
{ DPDMAI_CMDID_OPEN, "dpdmai" },
{ DPDBG_CMDID_OPEN, "dpdbg" },
{ 0, NULL }
};
int i;
for (i = 0; dev_ids[i].type; i++)
if (!strcmp(dev_ids[i].type, type))
return dev_ids[i].cmd_id;
return -1;
}
int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int obj_id,
char *obj_type,
u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct fsl_mc_obj_cmd_open *cmd_params;
int err = 0;
int cmd_id = fsl_mc_get_open_cmd_id(obj_type);
if (cmd_id == -1)
return -ENODEV;
/* prepare command */
cmd.header = mc_encode_cmd_header(cmd_id, cmd_flags, 0);
cmd_params = (struct fsl_mc_obj_cmd_open *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return err;
}
EXPORT_SYMBOL_GPL(fsl_mc_obj_open);
int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(OBJ_CMDID_CLOSE, cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(fsl_mc_obj_close);
int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(OBJ_CMDID_RESET, cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(fsl_mc_obj_reset);
...@@ -137,77 +137,107 @@ static void vfio_ccw_sch_irq(struct subchannel *sch) ...@@ -137,77 +137,107 @@ static void vfio_ccw_sch_irq(struct subchannel *sch)
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
} }
static void vfio_ccw_free_regions(struct vfio_ccw_private *private) static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
{ {
if (private->crw_region)
kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
if (private->schib_region)
kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
if (private->cmd_region)
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
if (private->io_region)
kmem_cache_free(vfio_ccw_io_region, private->io_region);
}
static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_private *private; struct vfio_ccw_private *private;
int ret = -ENOMEM;
if (pmcw->qf) {
dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
dev_name(&sch->dev));
return -ENODEV;
}
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private) if (!private)
return -ENOMEM; return ERR_PTR(-ENOMEM);
private->sch = sch;
mutex_init(&private->io_mutex);
private->state = VFIO_CCW_STATE_NOT_OPER;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
atomic_set(&private->avail, 1);
private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
GFP_KERNEL); GFP_KERNEL);
if (!private->cp.guest_cp) if (!private->cp.guest_cp)
goto out_free; goto out_free_private;
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!private->io_region) if (!private->io_region)
goto out_free; goto out_free_cp;
private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!private->cmd_region) if (!private->cmd_region)
goto out_free; goto out_free_io;
private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!private->schib_region) if (!private->schib_region)
goto out_free; goto out_free_cmd;
private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!private->crw_region) if (!private->crw_region)
goto out_free; goto out_free_schib;
return private;
out_free_schib:
kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
out_free_cmd:
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
out_free_io:
kmem_cache_free(vfio_ccw_io_region, private->io_region);
out_free_cp:
kfree(private->cp.guest_cp);
out_free_private:
mutex_destroy(&private->io_mutex);
kfree(private);
return ERR_PTR(-ENOMEM);
}
static void vfio_ccw_free_private(struct vfio_ccw_private *private)
{
struct vfio_ccw_crw *crw, *temp;
list_for_each_entry_safe(crw, temp, &private->crw, next) {
list_del(&crw->next);
kfree(crw);
}
kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private->cp.guest_cp);
mutex_destroy(&private->io_mutex);
kfree(private);
}
static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_private *private;
int ret = -ENOMEM;
if (pmcw->qf) {
dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
dev_name(&sch->dev));
return -ENODEV;
}
private = vfio_ccw_alloc_private(sch);
if (IS_ERR(private))
return PTR_ERR(private);
private->sch = sch;
dev_set_drvdata(&sch->dev, private); dev_set_drvdata(&sch->dev, private);
mutex_init(&private->io_mutex);
spin_lock_irq(sch->lock); spin_lock_irq(sch->lock);
private->state = VFIO_CCW_STATE_NOT_OPER;
sch->isc = VFIO_CCW_ISC; sch->isc = VFIO_CCW_ISC;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
spin_unlock_irq(sch->lock); spin_unlock_irq(sch->lock);
if (ret) if (ret)
goto out_free; goto out_free;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
atomic_set(&private->avail, 1);
private->state = VFIO_CCW_STATE_STANDBY; private->state = VFIO_CCW_STATE_STANDBY;
ret = vfio_ccw_mdev_reg(sch); ret = vfio_ccw_mdev_reg(sch);
...@@ -228,31 +258,20 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) ...@@ -228,31 +258,20 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
cio_disable_subchannel(sch); cio_disable_subchannel(sch);
out_free: out_free:
dev_set_drvdata(&sch->dev, NULL); dev_set_drvdata(&sch->dev, NULL);
vfio_ccw_free_regions(private); vfio_ccw_free_private(private);
kfree(private->cp.guest_cp);
kfree(private);
return ret; return ret;
} }
static void vfio_ccw_sch_remove(struct subchannel *sch) static void vfio_ccw_sch_remove(struct subchannel *sch)
{ {
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
struct vfio_ccw_crw *crw, *temp;
vfio_ccw_sch_quiesce(sch); vfio_ccw_sch_quiesce(sch);
list_for_each_entry_safe(crw, temp, &private->crw, next) {
list_del(&crw->next);
kfree(crw);
}
vfio_ccw_mdev_unreg(sch); vfio_ccw_mdev_unreg(sch);
dev_set_drvdata(&sch->dev, NULL); dev_set_drvdata(&sch->dev, NULL);
vfio_ccw_free_regions(private); vfio_ccw_free_private(private);
kfree(private->cp.guest_cp);
kfree(private);
VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid, sch->schid.cssid, sch->schid.ssid,
...@@ -449,7 +468,7 @@ static int __init vfio_ccw_sch_init(void) ...@@ -449,7 +468,7 @@ static int __init vfio_ccw_sch_init(void)
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
if (!vfio_ccw_work_q) { if (!vfio_ccw_work_q) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_regions;
} }
vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
...@@ -458,7 +477,7 @@ static int __init vfio_ccw_sch_init(void) ...@@ -458,7 +477,7 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_io_region), NULL); sizeof(struct ccw_io_region), NULL);
if (!vfio_ccw_io_region) { if (!vfio_ccw_io_region) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_regions;
} }
vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
...@@ -467,7 +486,7 @@ static int __init vfio_ccw_sch_init(void) ...@@ -467,7 +486,7 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_cmd_region), NULL); sizeof(struct ccw_cmd_region), NULL);
if (!vfio_ccw_cmd_region) { if (!vfio_ccw_cmd_region) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_regions;
} }
vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region", vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
...@@ -477,7 +496,7 @@ static int __init vfio_ccw_sch_init(void) ...@@ -477,7 +496,7 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_schib_region) { if (!vfio_ccw_schib_region) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_regions;
} }
vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region", vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
...@@ -487,19 +506,25 @@ static int __init vfio_ccw_sch_init(void) ...@@ -487,19 +506,25 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_crw_region) { if (!vfio_ccw_crw_region) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_regions;
} }
ret = mdev_register_driver(&vfio_ccw_mdev_driver);
if (ret)
goto out_regions;
isc_register(VFIO_CCW_ISC); isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver); ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) { if (ret) {
isc_unregister(VFIO_CCW_ISC); isc_unregister(VFIO_CCW_ISC);
goto out_err; goto out_driver;
} }
return ret; return ret;
out_err: out_driver:
mdev_unregister_driver(&vfio_ccw_mdev_driver);
out_regions:
vfio_ccw_destroy_regions(); vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q); destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit(); vfio_ccw_debug_exit();
...@@ -509,6 +534,7 @@ static int __init vfio_ccw_sch_init(void) ...@@ -509,6 +534,7 @@ static int __init vfio_ccw_sch_init(void)
static void __exit vfio_ccw_sch_exit(void) static void __exit vfio_ccw_sch_exit(void)
{ {
css_driver_unregister(&vfio_ccw_sch_driver); css_driver_unregister(&vfio_ccw_sch_driver);
mdev_unregister_driver(&vfio_ccw_mdev_driver);
isc_unregister(VFIO_CCW_ISC); isc_unregister(VFIO_CCW_ISC);
vfio_ccw_destroy_regions(); vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q); destroy_workqueue(vfio_ccw_work_q);
......
This diff is collapsed.
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/eventfd.h> #include <linux/eventfd.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/vfio_ccw.h> #include <linux/vfio_ccw.h>
#include <linux/vfio.h>
#include <asm/crw.h> #include <asm/crw.h>
#include <asm/debug.h> #include <asm/debug.h>
...@@ -67,6 +68,7 @@ struct vfio_ccw_crw { ...@@ -67,6 +68,7 @@ struct vfio_ccw_crw {
/** /**
* struct vfio_ccw_private * struct vfio_ccw_private
* @vdev: Embedded VFIO device
* @sch: pointer to the subchannel * @sch: pointer to the subchannel
* @state: internal state of the device * @state: internal state of the device
* @completion: synchronization helper of the I/O completion * @completion: synchronization helper of the I/O completion
...@@ -90,6 +92,7 @@ struct vfio_ccw_crw { ...@@ -90,6 +92,7 @@ struct vfio_ccw_crw {
* @crw_work: work for deferral process of CRW handling * @crw_work: work for deferral process of CRW handling
*/ */
struct vfio_ccw_private { struct vfio_ccw_private {
struct vfio_device vdev;
struct subchannel *sch; struct subchannel *sch;
int state; int state;
struct completion *completion; struct completion *completion;
...@@ -121,6 +124,8 @@ extern void vfio_ccw_mdev_unreg(struct subchannel *sch); ...@@ -121,6 +124,8 @@ extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
extern int vfio_ccw_sch_quiesce(struct subchannel *sch); extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
extern struct mdev_driver vfio_ccw_mdev_driver;
/* /*
* States of the device statemachine. * States of the device statemachine.
*/ */
......
...@@ -351,7 +351,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev) ...@@ -351,7 +351,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
list_add(&matrix_mdev->node, &matrix_dev->mdev_list); list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock); mutex_unlock(&matrix_dev->lock);
ret = vfio_register_group_dev(&matrix_mdev->vdev); ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret) if (ret)
goto err_list; goto err_list;
dev_set_drvdata(&mdev->dev, matrix_mdev); dev_set_drvdata(&mdev->dev, matrix_mdev);
......
...@@ -65,6 +65,34 @@ static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev) ...@@ -65,6 +65,34 @@ static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
kfree(vdev->regions); kfree(vdev->regions);
} }
static int vfio_fsl_mc_reset_device(struct vfio_fsl_mc_device *vdev)
{
struct fsl_mc_device *mc_dev = vdev->mc_dev;
int ret = 0;
if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
return dprc_reset_container(mc_dev->mc_io, 0,
mc_dev->mc_handle,
mc_dev->obj_desc.id,
DPRC_RESET_OPTION_NON_RECURSIVE);
} else {
u16 token;
ret = fsl_mc_obj_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
mc_dev->obj_desc.type,
&token);
if (ret)
goto out;
ret = fsl_mc_obj_reset(mc_dev->mc_io, 0, token);
if (ret) {
fsl_mc_obj_close(mc_dev->mc_io, 0, token);
goto out;
}
ret = fsl_mc_obj_close(mc_dev->mc_io, 0, token);
}
out:
return ret;
}
static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev) static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
{ {
...@@ -78,9 +106,7 @@ static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev) ...@@ -78,9 +106,7 @@ static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
vfio_fsl_mc_regions_cleanup(vdev); vfio_fsl_mc_regions_cleanup(vdev);
/* reset the device before cleaning up the interrupts */ /* reset the device before cleaning up the interrupts */
ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle, ret = vfio_fsl_mc_reset_device(vdev);
mc_cont->obj_desc.id,
DPRC_RESET_OPTION_NON_RECURSIVE);
if (WARN_ON(ret)) if (WARN_ON(ret))
dev_warn(&mc_cont->dev, dev_warn(&mc_cont->dev,
...@@ -203,18 +229,7 @@ static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev, ...@@ -203,18 +229,7 @@ static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
} }
case VFIO_DEVICE_RESET: case VFIO_DEVICE_RESET:
{ {
int ret; return vfio_fsl_mc_reset_device(vdev);
struct fsl_mc_device *mc_dev = vdev->mc_dev;
/* reset is supported only for the DPRC */
if (!is_fsl_mc_bus_dprc(mc_dev))
return -ENOTTY;
ret = dprc_reset_container(mc_dev->mc_io, 0,
mc_dev->mc_handle,
mc_dev->obj_desc.id,
DPRC_RESET_OPTION_NON_RECURSIVE);
return ret;
} }
default: default:
...@@ -505,22 +520,13 @@ static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev) ...@@ -505,22 +520,13 @@ static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
{ {
struct iommu_group *group;
struct vfio_fsl_mc_device *vdev; struct vfio_fsl_mc_device *vdev;
struct device *dev = &mc_dev->dev; struct device *dev = &mc_dev->dev;
int ret; int ret;
group = vfio_iommu_group_get(dev);
if (!group) {
dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
return -EINVAL;
}
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) { if (!vdev)
ret = -ENOMEM; return -ENOMEM;
goto out_group_put;
}
vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops); vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops);
vdev->mc_dev = mc_dev; vdev->mc_dev = mc_dev;
...@@ -556,8 +562,6 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) ...@@ -556,8 +562,6 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
out_uninit: out_uninit:
vfio_uninit_group_dev(&vdev->vdev); vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev); kfree(vdev);
out_group_put:
vfio_iommu_group_put(group, dev);
return ret; return ret;
} }
...@@ -574,8 +578,6 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev) ...@@ -574,8 +578,6 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
vfio_uninit_group_dev(&vdev->vdev); vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev); kfree(vdev);
vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
return 0; return 0;
} }
......
...@@ -13,60 +13,23 @@ ...@@ -13,60 +13,23 @@
#include "mdev_private.h" #include "mdev_private.h"
static int mdev_attach_iommu(struct mdev_device *mdev)
{
int ret;
struct iommu_group *group;
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
ret = iommu_group_add_device(group, &mdev->dev);
if (!ret)
dev_info(&mdev->dev, "MDEV: group_id = %d\n",
iommu_group_id(group));
iommu_group_put(group);
return ret;
}
static void mdev_detach_iommu(struct mdev_device *mdev)
{
iommu_group_remove_device(&mdev->dev);
dev_info(&mdev->dev, "MDEV: detaching iommu\n");
}
static int mdev_probe(struct device *dev) static int mdev_probe(struct device *dev)
{ {
struct mdev_driver *drv = struct mdev_driver *drv =
container_of(dev->driver, struct mdev_driver, driver); container_of(dev->driver, struct mdev_driver, driver);
struct mdev_device *mdev = to_mdev_device(dev);
int ret;
ret = mdev_attach_iommu(mdev);
if (ret)
return ret;
if (drv->probe) { if (!drv->probe)
ret = drv->probe(mdev); return 0;
if (ret) return drv->probe(to_mdev_device(dev));
mdev_detach_iommu(mdev);
}
return ret;
} }
static void mdev_remove(struct device *dev) static void mdev_remove(struct device *dev)
{ {
struct mdev_driver *drv = struct mdev_driver *drv =
container_of(dev->driver, struct mdev_driver, driver); container_of(dev->driver, struct mdev_driver, driver);
struct mdev_device *mdev = to_mdev_device(dev);
if (drv->remove) if (drv->remove)
drv->remove(mdev); drv->remove(to_mdev_device(dev));
mdev_detach_iommu(mdev);
} }
static int mdev_match(struct device *dev, struct device_driver *drv) static int mdev_match(struct device *dev, struct device_driver *drv)
......
...@@ -119,7 +119,7 @@ static int vfio_mdev_probe(struct mdev_device *mdev) ...@@ -119,7 +119,7 @@ static int vfio_mdev_probe(struct mdev_device *mdev)
return -ENOMEM; return -ENOMEM;
vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops); vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops);
ret = vfio_register_group_dev(vdev); ret = vfio_register_emulated_iommu_dev(vdev);
if (ret) if (ret)
goto out_uninit; goto out_uninit;
......
...@@ -1806,7 +1806,6 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device); ...@@ -1806,7 +1806,6 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
{ {
struct pci_dev *pdev = vdev->pdev; struct pci_dev *pdev = vdev->pdev;
struct iommu_group *group;
int ret; int ret;
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
...@@ -1825,10 +1824,6 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) ...@@ -1825,10 +1824,6 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
return -EBUSY; return -EBUSY;
} }
group = vfio_iommu_group_get(&pdev->dev);
if (!group)
return -EINVAL;
if (pci_is_root_bus(pdev->bus)) { if (pci_is_root_bus(pdev->bus)) {
ret = vfio_assign_device_set(&vdev->vdev, vdev); ret = vfio_assign_device_set(&vdev->vdev, vdev);
} else if (!pci_probe_reset_slot(pdev->slot)) { } else if (!pci_probe_reset_slot(pdev->slot)) {
...@@ -1842,10 +1837,10 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) ...@@ -1842,10 +1837,10 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
} }
if (ret) if (ret)
goto out_group_put; return ret;
ret = vfio_pci_vf_init(vdev); ret = vfio_pci_vf_init(vdev);
if (ret) if (ret)
goto out_group_put; return ret;
ret = vfio_pci_vga_init(vdev); ret = vfio_pci_vga_init(vdev);
if (ret) if (ret)
goto out_vf; goto out_vf;
...@@ -1876,8 +1871,6 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) ...@@ -1876,8 +1871,6 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
vfio_pci_set_power_state(vdev, PCI_D0); vfio_pci_set_power_state(vdev, PCI_D0);
out_vf: out_vf:
vfio_pci_vf_uninit(vdev); vfio_pci_vf_uninit(vdev);
out_group_put:
vfio_iommu_group_put(group, &pdev->dev);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(vfio_pci_core_register_device); EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
...@@ -1893,8 +1886,6 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) ...@@ -1893,8 +1886,6 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
vfio_pci_vf_uninit(vdev); vfio_pci_vf_uninit(vdev);
vfio_pci_vga_uninit(vdev); vfio_pci_vga_uninit(vdev);
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
if (!disable_idle_d3) if (!disable_idle_d3)
vfio_pci_set_power_state(vdev, PCI_D0); vfio_pci_set_power_state(vdev, PCI_D0);
} }
......
...@@ -25,20 +25,121 @@ ...@@ -25,20 +25,121 @@
#define OPREGION_RVDS 0x3c2 #define OPREGION_RVDS 0x3c2
#define OPREGION_VERSION 0x16 #define OPREGION_VERSION 0x16
struct igd_opregion_vbt {
void *opregion;
void *vbt_ex;
};
/**
* igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
* @dst: User buffer ptr to copy to.
* @off: Offset to user buffer ptr. Increased by bytes on return.
* @src: Source buffer to copy from.
* @pos: Increased by bytes on return.
* @remaining: Decreased by bytes on return.
* @bytes: Bytes to copy and adjust off, pos and remaining.
*
* Copy OpRegion to offset from specific source ptr and shift the offset.
*
* Return: 0 on success, -EFAULT otherwise.
*
*/
static inline unsigned long igd_opregion_shift_copy(char __user *dst,
loff_t *off,
void *src,
loff_t *pos,
size_t *remaining,
size_t bytes)
{
if (copy_to_user(dst + (*off), src, bytes))
return -EFAULT;
*off += bytes;
*pos += bytes;
*remaining -= bytes;
return 0;
}
static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev, static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
char __user *buf, size_t count, loff_t *ppos, char __user *buf, size_t count, loff_t *ppos,
bool iswrite) bool iswrite)
{ {
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
void *base = vdev->region[i].data; struct igd_opregion_vbt *opregionvbt = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0;
size_t remaining;
if (pos >= vdev->region[i].size || iswrite) if (pos >= vdev->region[i].size || iswrite)
return -EINVAL; return -EINVAL;
count = min(count, (size_t)(vdev->region[i].size - pos)); count = min_t(size_t, count, vdev->region[i].size - pos);
remaining = count;
/* Copy until OpRegion version */
if (remaining && pos < OPREGION_VERSION) {
size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos);
if (igd_opregion_shift_copy(buf, &off,
opregionvbt->opregion + pos, &pos,
&remaining, bytes))
return -EFAULT;
}
/* Copy patched (if necessary) OpRegion version */
if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) {
size_t bytes = min_t(size_t, remaining,
OPREGION_VERSION + sizeof(__le16) - pos);
__le16 version = *(__le16 *)(opregionvbt->opregion +
OPREGION_VERSION);
/* Patch to 2.1 if OpRegion 2.0 has extended VBT */
if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex)
version = cpu_to_le16(0x0201);
if (igd_opregion_shift_copy(buf, &off,
&version + (pos - OPREGION_VERSION),
&pos, &remaining, bytes))
return -EFAULT;
}
/* Copy until RVDA */
if (remaining && pos < OPREGION_RVDA) {
size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos);
if (igd_opregion_shift_copy(buf, &off,
opregionvbt->opregion + pos, &pos,
&remaining, bytes))
return -EFAULT;
}
/* Copy modified (if necessary) RVDA */
if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) {
size_t bytes = min_t(size_t, remaining,
OPREGION_RVDA + sizeof(__le64) - pos);
__le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ?
OPREGION_SIZE : 0);
if (igd_opregion_shift_copy(buf, &off,
&rvda + (pos - OPREGION_RVDA),
&pos, &remaining, bytes))
return -EFAULT;
}
/* Copy the rest of OpRegion */
if (remaining && pos < OPREGION_SIZE) {
size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos);
if (igd_opregion_shift_copy(buf, &off,
opregionvbt->opregion + pos, &pos,
&remaining, bytes))
return -EFAULT;
}
if (copy_to_user(buf, base + pos, count)) /* Copy extended VBT if exists */
if (remaining &&
copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE),
remaining))
return -EFAULT; return -EFAULT;
*ppos += count; *ppos += count;
...@@ -49,7 +150,13 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev, ...@@ -49,7 +150,13 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev, static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
struct vfio_pci_region *region) struct vfio_pci_region *region)
{ {
memunmap(region->data); struct igd_opregion_vbt *opregionvbt = region->data;
if (opregionvbt->vbt_ex)
memunmap(opregionvbt->vbt_ex);
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
} }
static const struct vfio_pci_regops vfio_pci_igd_regops = { static const struct vfio_pci_regops vfio_pci_igd_regops = {
...@@ -61,7 +168,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev) ...@@ -61,7 +168,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
{ {
__le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR); __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
u32 addr, size; u32 addr, size;
void *base; struct igd_opregion_vbt *opregionvbt;
int ret; int ret;
u16 version; u16 version;
...@@ -72,84 +179,93 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev) ...@@ -72,84 +179,93 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
if (!addr || !(~addr)) if (!addr || !(~addr))
return -ENODEV; return -ENODEV;
base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB); opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
if (!base) if (!opregionvbt)
return -ENOMEM; return -ENOMEM;
if (memcmp(base, OPREGION_SIGNATURE, 16)) { opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
memunmap(base); if (!opregionvbt->opregion) {
kfree(opregionvbt);
return -ENOMEM;
}
if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) {
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return -EINVAL; return -EINVAL;
} }
size = le32_to_cpu(*(__le32 *)(base + 16)); size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16));
if (!size) { if (!size) {
memunmap(base); memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return -EINVAL; return -EINVAL;
} }
size *= 1024; /* In KB */ size *= 1024; /* In KB */
/* /*
* Support opregion v2.1+ * OpRegion and VBT:
* When VBT data exceeds 6KB size and cannot be within mailbox #4, then * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
* the Extended VBT region next to opregion is used to hold the VBT data. * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
* RVDA (Relative Address of VBT Data from Opregion Base) and RVDS * to hold the VBT data, the Extended VBT region is introduced since
* (Raw VBT Data Size) from opregion structure member are used to hold the * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
* address from region base and size of VBT data. RVDA/RVDS are not * introduced to define the extended VBT data location and size.
* defined before opregion 2.0. * OpRegion 2.0: RVDA defines the absolute physical address of the
* * extended VBT data, RVDS defines the VBT data size.
* opregion 2.1+: RVDA is unsigned, relative offset from * OpRegion 2.1 and above: RVDA defines the relative address of the
* opregion base, and should point to the end of opregion. * extended VBT data to OpRegion base, RVDS defines the VBT data size.
* otherwise, exposing to userspace to allow read access to everything between
* the OpRegion and VBT is not safe.
* RVDS is defined as size in bytes.
* *
* opregion 2.0: rvda is the physical VBT address. * Due to the RVDA definition diff in OpRegion VBT (also the only diff
* Since rvda is HPA it cannot be directly used in guest. * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
* And it should not be practically available for end user,so it is not supported. * for OpRegion 2.0 and above makes it possible to support the
* non-contiguous VBT through a single vfio region. From r/w ops view,
* only contiguous VBT after OpRegion with version 2.1+ is exposed,
* regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
* ops will on-the-fly shift the actural offset into VBT so that data at
* correct position can be returned to the requester.
*/ */
version = le16_to_cpu(*(__le16 *)(base + OPREGION_VERSION)); version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion +
OPREGION_VERSION));
if (version >= 0x0200) { if (version >= 0x0200) {
u64 rvda; u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion +
u32 rvds; OPREGION_RVDA));
u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion +
OPREGION_RVDS));
rvda = le64_to_cpu(*(__le64 *)(base + OPREGION_RVDA)); /* The extended VBT is valid only when RVDA/RVDS are non-zero */
rvds = le32_to_cpu(*(__le32 *)(base + OPREGION_RVDS));
if (rvda && rvds) { if (rvda && rvds) {
/* no support for opregion v2.0 with physical VBT address */
if (version == 0x0200) {
memunmap(base);
pci_err(vdev->pdev,
"IGD assignment does not support opregion v2.0 with an extended VBT region\n");
return -EINVAL;
}
if (rvda != size) {
memunmap(base);
pci_err(vdev->pdev,
"Extended VBT does not follow opregion on version 0x%04x\n",
version);
return -EINVAL;
}
/* region size for opregion v2.0+: opregion and VBT size. */
size += rvds; size += rvds;
}
}
if (size != OPREGION_SIZE) { /*
memunmap(base); * Extended VBT location by RVDA:
base = memremap(addr, size, MEMREMAP_WB); * Absolute physical addr for 2.0.
if (!base) * Relative addr to OpRegion header for 2.1+.
*/
if (version == 0x0200)
addr = rvda;
else
addr += rvda;
opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB);
if (!opregionvbt->vbt_ex) {
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return -ENOMEM; return -ENOMEM;
} }
}
}
ret = vfio_pci_register_dev_region(vdev, ret = vfio_pci_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
&vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base); size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
if (ret) { if (ret) {
memunmap(base); if (opregionvbt->vbt_ex)
memunmap(opregionvbt->vbt_ex);
memunmap(opregionvbt->opregion);
kfree(opregionvbt);
return ret; return ret;
} }
......
...@@ -642,7 +642,6 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev, ...@@ -642,7 +642,6 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
int vfio_platform_probe_common(struct vfio_platform_device *vdev, int vfio_platform_probe_common(struct vfio_platform_device *vdev,
struct device *dev) struct device *dev)
{ {
struct iommu_group *group;
int ret; int ret;
vfio_init_group_dev(&vdev->vdev, dev, &vfio_platform_ops); vfio_init_group_dev(&vdev->vdev, dev, &vfio_platform_ops);
...@@ -663,24 +662,15 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev, ...@@ -663,24 +662,15 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
goto out_uninit; goto out_uninit;
} }
group = vfio_iommu_group_get(dev);
if (!group) {
dev_err(dev, "No IOMMU group for device %s\n", vdev->name);
ret = -EINVAL;
goto put_reset;
}
ret = vfio_register_group_dev(&vdev->vdev); ret = vfio_register_group_dev(&vdev->vdev);
if (ret) if (ret)
goto put_iommu; goto put_reset;
mutex_init(&vdev->igate); mutex_init(&vdev->igate);
pm_runtime_enable(dev); pm_runtime_enable(dev);
return 0; return 0;
put_iommu:
vfio_iommu_group_put(group, dev);
put_reset: put_reset:
vfio_platform_put_reset(vdev); vfio_platform_put_reset(vdev);
out_uninit: out_uninit:
...@@ -696,7 +686,6 @@ void vfio_platform_remove_common(struct vfio_platform_device *vdev) ...@@ -696,7 +686,6 @@ void vfio_platform_remove_common(struct vfio_platform_device *vdev)
pm_runtime_disable(vdev->device); pm_runtime_disable(vdev->device);
vfio_platform_put_reset(vdev); vfio_platform_put_reset(vdev);
vfio_uninit_group_dev(&vdev->vdev); vfio_uninit_group_dev(&vdev->vdev);
vfio_iommu_group_put(vdev->vdev.dev->iommu_group, vdev->vdev.dev);
} }
EXPORT_SYMBOL_GPL(vfio_platform_remove_common); EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*/
enum vfio_group_type {
/*
* Physical device with IOMMU backing.
*/
VFIO_IOMMU,
/*
* Virtual device without IOMMU backing. The VFIO core fakes up an
* iommu_group as the iommu_group sysfs interface is part of the
* userspace ABI. The user of these devices must not be able to
* directly trigger unmediated DMA.
*/
VFIO_EMULATED_IOMMU,
/*
* Physical device without IOMMU backing. The VFIO core fakes up an
* iommu_group as the iommu_group sysfs interface is part of the
* userspace ABI. Users can trigger unmediated DMA by the device,
* usage is highly dangerous, requires an explicit opt-in and will
* taint the kernel.
*/
VFIO_NO_IOMMU,
};
/* events for the backend driver notify callback */
enum vfio_iommu_notify_type {
VFIO_IOMMU_CONTAINER_CLOSE = 0,
};
/**
* struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
*/
struct vfio_iommu_driver_ops {
char *name;
struct module *owner;
void *(*open)(unsigned long arg);
void (*release)(void *iommu_data);
long (*ioctl)(void *iommu_data, unsigned int cmd,
unsigned long arg);
int (*attach_group)(void *iommu_data,
struct iommu_group *group,
enum vfio_group_type);
void (*detach_group)(void *iommu_data,
struct iommu_group *group);
int (*pin_pages)(void *iommu_data,
struct iommu_group *group,
unsigned long *user_pfn,
int npage, int prot,
unsigned long *phys_pfn);
int (*unpin_pages)(void *iommu_data,
unsigned long *user_pfn, int npage);
int (*register_notifier)(void *iommu_data,
unsigned long *events,
struct notifier_block *nb);
int (*unregister_notifier)(void *iommu_data,
struct notifier_block *nb);
int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
struct iommu_group *group);
void (*notify)(void *iommu_data,
enum vfio_iommu_notify_type event);
};
int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/mm.h> #include <linux/mm.h>
#include "vfio.h"
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/tce.h> #include <asm/tce.h>
...@@ -1238,13 +1239,16 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, ...@@ -1238,13 +1239,16 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
} }
static int tce_iommu_attach_group(void *iommu_data, static int tce_iommu_attach_group(void *iommu_data,
struct iommu_group *iommu_group) struct iommu_group *iommu_group, enum vfio_group_type type)
{ {
int ret = 0; int ret = 0;
struct tce_container *container = iommu_data; struct tce_container *container = iommu_data;
struct iommu_table_group *table_group; struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp = NULL; struct tce_iommu_group *tcegrp = NULL;
if (type == VFIO_EMULATED_IOMMU)
return -EINVAL;
mutex_lock(&container->lock); mutex_lock(&container->lock);
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
......
This diff is collapsed.
...@@ -620,6 +620,20 @@ int dpcon_reset(struct fsl_mc_io *mc_io, ...@@ -620,6 +620,20 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags, u32 cmd_flags,
u16 token); u16 token);
int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int obj_id,
char *obj_type,
u16 *token);
int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
/** /**
* struct dpcon_attr - Structure representing DPCON attributes * struct dpcon_attr - Structure representing DPCON attributes
* @id: DPCON object ID * @id: DPCON object ID
......
...@@ -18,7 +18,6 @@ struct mdev_device { ...@@ -18,7 +18,6 @@ struct mdev_device {
void *driver_data; void *driver_data;
struct list_head next; struct list_head next;
struct mdev_type *type; struct mdev_type *type;
struct device *iommu_device;
bool active; bool active;
}; };
...@@ -27,25 +26,6 @@ static inline struct mdev_device *to_mdev_device(struct device *dev) ...@@ -27,25 +26,6 @@ static inline struct mdev_device *to_mdev_device(struct device *dev)
return container_of(dev, struct mdev_device, dev); return container_of(dev, struct mdev_device, dev);
} }
/*
* Called by the parent device driver to set the device which represents
* this mdev in iommu protection scope. By default, the iommu device is
* NULL, that indicates using vendor defined isolation.
*
* @dev: the mediated device that iommu will isolate.
* @iommu_device: a pci device which represents the iommu for @dev.
*/
static inline void mdev_set_iommu_device(struct mdev_device *mdev,
struct device *iommu_device)
{
mdev->iommu_device = iommu_device;
}
static inline struct device *mdev_get_iommu_device(struct mdev_device *mdev)
{
return mdev->iommu_device;
}
unsigned int mdev_get_type_group_id(struct mdev_device *mdev); unsigned int mdev_get_type_group_id(struct mdev_device *mdev);
unsigned int mtype_get_type_group_id(struct mdev_type *mtype); unsigned int mtype_get_type_group_id(struct mdev_type *mtype);
struct device *mtype_get_parent_dev(struct mdev_type *mtype); struct device *mtype_get_parent_dev(struct mdev_type *mtype);
......
...@@ -71,68 +71,17 @@ struct vfio_device_ops { ...@@ -71,68 +71,17 @@ struct vfio_device_ops {
int (*match)(struct vfio_device *vdev, char *buf); int (*match)(struct vfio_device *vdev, char *buf);
}; };
extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
void vfio_init_group_dev(struct vfio_device *device, struct device *dev, void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
const struct vfio_device_ops *ops); const struct vfio_device_ops *ops);
void vfio_uninit_group_dev(struct vfio_device *device); void vfio_uninit_group_dev(struct vfio_device *device);
int vfio_register_group_dev(struct vfio_device *device); int vfio_register_group_dev(struct vfio_device *device);
int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device); void vfio_unregister_group_dev(struct vfio_device *device);
extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
extern void vfio_device_put(struct vfio_device *device); extern void vfio_device_put(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id); int vfio_assign_device_set(struct vfio_device *device, void *set_id);
/* events for the backend driver notify callback */
enum vfio_iommu_notify_type {
VFIO_IOMMU_CONTAINER_CLOSE = 0,
};
/**
* struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
*/
struct vfio_iommu_driver_ops {
char *name;
struct module *owner;
void *(*open)(unsigned long arg);
void (*release)(void *iommu_data);
ssize_t (*read)(void *iommu_data, char __user *buf,
size_t count, loff_t *ppos);
ssize_t (*write)(void *iommu_data, const char __user *buf,
size_t count, loff_t *size);
long (*ioctl)(void *iommu_data, unsigned int cmd,
unsigned long arg);
int (*mmap)(void *iommu_data, struct vm_area_struct *vma);
int (*attach_group)(void *iommu_data,
struct iommu_group *group);
void (*detach_group)(void *iommu_data,
struct iommu_group *group);
int (*pin_pages)(void *iommu_data,
struct iommu_group *group,
unsigned long *user_pfn,
int npage, int prot,
unsigned long *phys_pfn);
int (*unpin_pages)(void *iommu_data,
unsigned long *user_pfn, int npage);
int (*register_notifier)(void *iommu_data,
unsigned long *events,
struct notifier_block *nb);
int (*unregister_notifier)(void *iommu_data,
struct notifier_block *nb);
int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
struct iommu_group *group);
void (*notify)(void *iommu_data,
enum vfio_iommu_notify_type event);
};
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
extern void vfio_unregister_iommu_driver(
const struct vfio_iommu_driver_ops *ops);
/* /*
* External user API * External user API
*/ */
......
...@@ -553,7 +553,7 @@ static int mbochs_probe(struct mdev_device *mdev) ...@@ -553,7 +553,7 @@ static int mbochs_probe(struct mdev_device *mdev)
mbochs_create_config_space(mdev_state); mbochs_create_config_space(mdev_state);
mbochs_reset(mdev_state); mbochs_reset(mdev_state);
ret = vfio_register_group_dev(&mdev_state->vdev); ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
if (ret) if (ret)
goto err_mem; goto err_mem;
dev_set_drvdata(&mdev->dev, mdev_state); dev_set_drvdata(&mdev->dev, mdev_state);
......
...@@ -258,7 +258,7 @@ static int mdpy_probe(struct mdev_device *mdev) ...@@ -258,7 +258,7 @@ static int mdpy_probe(struct mdev_device *mdev)
mdpy_count++; mdpy_count++;
ret = vfio_register_group_dev(&mdev_state->vdev); ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
if (ret) if (ret)
goto err_mem; goto err_mem;
dev_set_drvdata(&mdev->dev, mdev_state); dev_set_drvdata(&mdev->dev, mdev_state);
......
...@@ -741,7 +741,7 @@ static int mtty_probe(struct mdev_device *mdev) ...@@ -741,7 +741,7 @@ static int mtty_probe(struct mdev_device *mdev)
mtty_create_config_space(mdev_state); mtty_create_config_space(mdev_state);
ret = vfio_register_group_dev(&mdev_state->vdev); ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
if (ret) if (ret)
goto err_vconfig; goto err_vconfig;
dev_set_drvdata(&mdev->dev, mdev_state); dev_set_drvdata(&mdev->dev, mdev_state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment