Commit 5513bc8e authored by Martin Schwidefsky's avatar Martin Schwidefsky

Merge tag 'vfio-ccw-20190425' of...

Merge tag 'vfio-ccw-20190425' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/vfio-ccw into features

Pull vfio-ccw from Cornelia Huck with the following changes:

 - support for sending halt/clear requests to the device

 - various bug fixes
parents c9f62152 d1ffa760
...@@ -20,5 +20,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o ...@@ -20,5 +20,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o obj-$(CONFIG_QDIO) += qdio.o
vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
vfio_ccw_async.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
...@@ -233,6 +233,7 @@ int hsch(struct subchannel_id schid) ...@@ -233,6 +233,7 @@ int hsch(struct subchannel_id schid)
return ccode; return ccode;
} }
EXPORT_SYMBOL(hsch);
static inline int __xsch(struct subchannel_id schid) static inline int __xsch(struct subchannel_id schid)
{ {
......
// SPDX-License-Identifier: GPL-2.0
/*
* Async I/O region for vfio_ccw
*
* Copyright Red Hat, Inc. 2019
*
* Author(s): Cornelia Huck <cohuck@redhat.com>
*/
#include <linux/vfio.h>
#include <linux/mdev.h>
#include "vfio_ccw_private.h"
static ssize_t vfio_ccw_async_region_read(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_cmd_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
mutex_lock(&private->io_mutex);
region = private->region[i].data;
if (copy_to_user(buf, (void *)region + pos, count))
ret = -EFAULT;
else
ret = count;
mutex_unlock(&private->io_mutex);
return ret;
}
static ssize_t vfio_ccw_async_region_write(struct vfio_ccw_private *private,
const char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_cmd_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
if (!mutex_trylock(&private->io_mutex))
return -EAGAIN;
region = private->region[i].data;
if (copy_from_user((void *)region + pos, buf, count)) {
ret = -EFAULT;
goto out_unlock;
}
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_ASYNC_REQ);
ret = region->ret_code ? region->ret_code : count;
out_unlock:
mutex_unlock(&private->io_mutex);
return ret;
}
static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
struct vfio_ccw_region *region)
{
}
const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
.read = vfio_ccw_async_region_read,
.write = vfio_ccw_async_region_write,
.release = vfio_ccw_async_region_release,
};
int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private)
{
return vfio_ccw_register_dev_region(private,
VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD,
&vfio_ccw_async_region_ops,
sizeof(struct ccw_cmd_region),
VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE,
private->cmd_region);
}
...@@ -362,6 +362,7 @@ static void cp_unpin_free(struct channel_program *cp) ...@@ -362,6 +362,7 @@ static void cp_unpin_free(struct channel_program *cp)
struct ccwchain *chain, *temp; struct ccwchain *chain, *temp;
int i; int i;
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) { for (i = 0; i < chain->ch_len; i++) {
pfn_array_table_unpin_free(chain->ch_pat + i, pfn_array_table_unpin_free(chain->ch_pat + i,
...@@ -732,6 +733,9 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) ...@@ -732,6 +733,9 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/ */
cp->orb.cmd.c64 = 1; cp->orb.cmd.c64 = 1;
if (!ret)
cp->initialized = true;
return ret; return ret;
} }
...@@ -746,6 +750,7 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) ...@@ -746,6 +750,7 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
*/ */
void cp_free(struct channel_program *cp) void cp_free(struct channel_program *cp)
{ {
if (cp->initialized)
cp_unpin_free(cp); cp_unpin_free(cp);
} }
...@@ -791,6 +796,10 @@ int cp_prefetch(struct channel_program *cp) ...@@ -791,6 +796,10 @@ int cp_prefetch(struct channel_program *cp)
struct ccwchain *chain; struct ccwchain *chain;
int len, idx, ret; int len, idx, ret;
/* this is an error in the caller */
if (!cp->initialized)
return -EINVAL;
list_for_each_entry(chain, &cp->ccwchain_list, next) { list_for_each_entry(chain, &cp->ccwchain_list, next) {
len = chain->ch_len; len = chain->ch_len;
for (idx = 0; idx < len; idx++) { for (idx = 0; idx < len; idx++) {
...@@ -826,6 +835,10 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm) ...@@ -826,6 +835,10 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
struct ccwchain *chain; struct ccwchain *chain;
struct ccw1 *cpa; struct ccw1 *cpa;
/* this is an error in the caller */
if (!cp->initialized)
return NULL;
orb = &cp->orb; orb = &cp->orb;
orb->cmd.intparm = intparm; orb->cmd.intparm = intparm;
...@@ -862,6 +875,9 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) ...@@ -862,6 +875,9 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
u32 cpa = scsw->cmd.cpa; u32 cpa = scsw->cmd.cpa;
u32 ccw_head; u32 ccw_head;
if (!cp->initialized)
return;
/* /*
* LATER: * LATER:
* For now, only update the cmd.cpa part. We may need to deal with * For now, only update the cmd.cpa part. We may need to deal with
...@@ -898,6 +914,9 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova) ...@@ -898,6 +914,9 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova)
struct ccwchain *chain; struct ccwchain *chain;
int i; int i;
if (!cp->initialized)
return false;
list_for_each_entry(chain, &cp->ccwchain_list, next) { list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) for (i = 0; i < chain->ch_len; i++)
if (pfn_array_table_iova_pinned(chain->ch_pat + i, if (pfn_array_table_iova_pinned(chain->ch_pat + i,
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* @ccwchain_list: list head of ccwchains * @ccwchain_list: list head of ccwchains
* @orb: orb for the currently processed ssch request * @orb: orb for the currently processed ssch request
* @mdev: the mediated device to perform page pinning/unpinning * @mdev: the mediated device to perform page pinning/unpinning
* @initialized: whether this instance is actually initialized
* *
* @ccwchain_list is the head of a ccwchain list, that contents the * @ccwchain_list is the head of a ccwchain list, that contents the
* translated result of the guest channel program that pointed out by * translated result of the guest channel program that pointed out by
...@@ -30,6 +31,7 @@ struct channel_program { ...@@ -30,6 +31,7 @@ struct channel_program {
struct list_head ccwchain_list; struct list_head ccwchain_list;
union orb orb; union orb orb;
struct device *mdev; struct device *mdev;
bool initialized;
}; };
extern int cp_init(struct channel_program *cp, struct device *mdev, extern int cp_init(struct channel_program *cp, struct device *mdev,
......
...@@ -3,9 +3,11 @@ ...@@ -3,9 +3,11 @@
* VFIO based Physical Subchannel device driver * VFIO based Physical Subchannel device driver
* *
* Copyright IBM Corp. 2017 * Copyright IBM Corp. 2017
* Copyright Red Hat, Inc. 2019
* *
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
* Cornelia Huck <cohuck@redhat.com>
*/ */
#include <linux/module.h> #include <linux/module.h>
...@@ -23,6 +25,7 @@ ...@@ -23,6 +25,7 @@
struct workqueue_struct *vfio_ccw_work_q; struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region; static struct kmem_cache *vfio_ccw_io_region;
static struct kmem_cache *vfio_ccw_cmd_region;
/* /*
* Helpers * Helpers
...@@ -40,11 +43,17 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) ...@@ -40,11 +43,17 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
if (ret != -EBUSY) if (ret != -EBUSY)
goto out_unlock; goto out_unlock;
do {
iretry = 255; iretry = 255;
do {
ret = cio_cancel_halt_clear(sch, &iretry); ret = cio_cancel_halt_clear(sch, &iretry);
while (ret == -EBUSY) {
if (ret == -EIO) {
pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
sch->schid.ssid, sch->schid.sch_no);
break;
}
/* /*
* Flush all I/O and wait for * Flush all I/O and wait for
* cancel/halt/clear completion. * cancel/halt/clear completion.
...@@ -52,14 +61,12 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) ...@@ -52,14 +61,12 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
private->completion = &completion; private->completion = &completion;
spin_unlock_irq(sch->lock); spin_unlock_irq(sch->lock);
if (ret == -EBUSY)
wait_for_completion_timeout(&completion, 3*HZ); wait_for_completion_timeout(&completion, 3*HZ);
spin_lock_irq(sch->lock);
private->completion = NULL; private->completion = NULL;
flush_workqueue(vfio_ccw_work_q); flush_workqueue(vfio_ccw_work_q);
ret = cio_cancel_halt_clear(sch, &iretry); spin_lock_irq(sch->lock);
};
ret = cio_disable_subchannel(sch); ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY); } while (ret == -EBUSY);
out_unlock: out_unlock:
...@@ -84,7 +91,9 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) ...@@ -84,7 +91,9 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
if (is_final) if (is_final)
cp_free(&private->cp); cp_free(&private->cp);
} }
mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb)); memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex);
if (private->io_trigger) if (private->io_trigger)
eventfd_signal(private->io_trigger, 1); eventfd_signal(private->io_trigger, 1);
...@@ -108,7 +117,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) ...@@ -108,7 +117,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
{ {
struct pmcw *pmcw = &sch->schib.pmcw; struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_private *private; struct vfio_ccw_private *private;
int ret; int ret = -ENOMEM;
if (pmcw->qf) { if (pmcw->qf) {
dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
...@@ -122,13 +131,17 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) ...@@ -122,13 +131,17 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA); GFP_KERNEL | GFP_DMA);
if (!private->io_region) { if (!private->io_region)
kfree(private); goto out_free;
return -ENOMEM;
} private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
GFP_KERNEL | GFP_DMA);
if (!private->cmd_region)
goto out_free;
private->sch = sch; private->sch = sch;
dev_set_drvdata(&sch->dev, private); dev_set_drvdata(&sch->dev, private);
mutex_init(&private->io_mutex);
spin_lock_irq(sch->lock); spin_lock_irq(sch->lock);
private->state = VFIO_CCW_STATE_NOT_OPER; private->state = VFIO_CCW_STATE_NOT_OPER;
...@@ -152,6 +165,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) ...@@ -152,6 +165,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
cio_disable_subchannel(sch); cio_disable_subchannel(sch);
out_free: out_free:
dev_set_drvdata(&sch->dev, NULL); dev_set_drvdata(&sch->dev, NULL);
if (private->cmd_region)
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
if (private->io_region)
kmem_cache_free(vfio_ccw_io_region, private->io_region); kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private); kfree(private);
return ret; return ret;
...@@ -167,6 +183,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch) ...@@ -167,6 +183,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL); dev_set_drvdata(&sch->dev, NULL);
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region); kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private); kfree(private);
...@@ -241,7 +258,7 @@ static struct css_driver vfio_ccw_sch_driver = { ...@@ -241,7 +258,7 @@ static struct css_driver vfio_ccw_sch_driver = {
static int __init vfio_ccw_sch_init(void) static int __init vfio_ccw_sch_init(void)
{ {
int ret; int ret = -ENOMEM;
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
if (!vfio_ccw_work_q) if (!vfio_ccw_work_q)
...@@ -251,20 +268,30 @@ static int __init vfio_ccw_sch_init(void) ...@@ -251,20 +268,30 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_io_region), 0, sizeof(struct ccw_io_region), 0,
SLAB_ACCOUNT, 0, SLAB_ACCOUNT, 0,
sizeof(struct ccw_io_region), NULL); sizeof(struct ccw_io_region), NULL);
if (!vfio_ccw_io_region) { if (!vfio_ccw_io_region)
destroy_workqueue(vfio_ccw_work_q); goto out_err;
return -ENOMEM;
} vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
sizeof(struct ccw_cmd_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_cmd_region), NULL);
if (!vfio_ccw_cmd_region)
goto out_err;
isc_register(VFIO_CCW_ISC); isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver); ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) { if (ret) {
isc_unregister(VFIO_CCW_ISC); isc_unregister(VFIO_CCW_ISC);
kmem_cache_destroy(vfio_ccw_io_region); goto out_err;
destroy_workqueue(vfio_ccw_work_q);
} }
return ret; return ret;
out_err:
kmem_cache_destroy(vfio_ccw_cmd_region);
kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
return ret;
} }
static void __exit vfio_ccw_sch_exit(void) static void __exit vfio_ccw_sch_exit(void)
......
...@@ -3,8 +3,10 @@ ...@@ -3,8 +3,10 @@
* Finite state machine for vfio-ccw device handling * Finite state machine for vfio-ccw device handling
* *
* Copyright IBM Corp. 2017 * Copyright IBM Corp. 2017
* Copyright Red Hat, Inc. 2019
* *
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Cornelia Huck <cohuck@redhat.com>
*/ */
#include <linux/vfio.h> #include <linux/vfio.h>
...@@ -28,9 +30,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) ...@@ -28,9 +30,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
sch = private->sch; sch = private->sch;
spin_lock_irqsave(sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
private->state = VFIO_CCW_STATE_BUSY;
orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
if (!orb) {
ret = -EIO;
goto out;
}
/* Issue "Start Subchannel" */ /* Issue "Start Subchannel" */
ccode = ssch(sch->schid, orb); ccode = ssch(sch->schid, orb);
...@@ -42,6 +47,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private) ...@@ -42,6 +47,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
*/ */
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
ret = 0; ret = 0;
private->state = VFIO_CCW_STATE_CP_PENDING;
break; break;
case 1: /* Status pending */ case 1: /* Status pending */
case 2: /* Busy */ case 2: /* Busy */
...@@ -64,6 +70,76 @@ static int fsm_io_helper(struct vfio_ccw_private *private) ...@@ -64,6 +70,76 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
default: default:
ret = ccode; ret = ccode;
} }
out:
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
static int fsm_do_halt(struct vfio_ccw_private *private)
{
struct subchannel *sch;
unsigned long flags;
int ccode;
int ret;
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
/* Issue "Halt Subchannel" */
ccode = hsch(sch->schid);
switch (ccode) {
case 0:
/*
* Initialize device status information
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
ret = 0;
break;
case 1: /* Status pending */
case 2: /* Busy */
ret = -EBUSY;
break;
case 3: /* Device not operational */
ret = -ENODEV;
break;
default:
ret = ccode;
}
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
static int fsm_do_clear(struct vfio_ccw_private *private)
{
struct subchannel *sch;
unsigned long flags;
int ccode;
int ret;
sch = private->sch;
spin_lock_irqsave(sch->lock, flags);
/* Issue "Clear Subchannel" */
ccode = csch(sch->schid);
switch (ccode) {
case 0:
/*
* Initialize device status information
*/
sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
/* TODO: check what else we might need to clear */
ret = 0;
break;
case 3: /* Device not operational */
ret = -ENODEV;
break;
default:
ret = ccode;
}
spin_unlock_irqrestore(sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return ret; return ret;
} }
...@@ -102,6 +178,30 @@ static void fsm_io_busy(struct vfio_ccw_private *private, ...@@ -102,6 +178,30 @@ static void fsm_io_busy(struct vfio_ccw_private *private,
private->io_region->ret_code = -EBUSY; private->io_region->ret_code = -EBUSY;
} }
static void fsm_io_retry(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
private->io_region->ret_code = -EAGAIN;
}
static void fsm_async_error(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct ccw_cmd_region *cmd_region = private->cmd_region;
pr_err("vfio-ccw: FSM: %s request from state:%d\n",
cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
"<unknown>", private->state);
cmd_region->ret_code = -EIO;
}
static void fsm_async_retry(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
private->cmd_region->ret_code = -EAGAIN;
}
static void fsm_disabled_irq(struct vfio_ccw_private *private, static void fsm_disabled_irq(struct vfio_ccw_private *private,
enum vfio_ccw_event event) enum vfio_ccw_event event)
{ {
...@@ -130,8 +230,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, ...@@ -130,8 +230,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
struct mdev_device *mdev = private->mdev; struct mdev_device *mdev = private->mdev;
char *errstr = "request"; char *errstr = "request";
private->state = VFIO_CCW_STATE_BUSY; private->state = VFIO_CCW_STATE_CP_PROCESSING;
memcpy(scsw, io_region->scsw_area, sizeof(*scsw)); memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
...@@ -166,21 +265,41 @@ static void fsm_io_request(struct vfio_ccw_private *private, ...@@ -166,21 +265,41 @@ static void fsm_io_request(struct vfio_ccw_private *private,
} }
return; return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) { } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
/* XXX: Handle halt. */ /* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP; io_region->ret_code = -EOPNOTSUPP;
goto err_out; goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
/* XXX: Handle clear. */ /* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP; io_region->ret_code = -EOPNOTSUPP;
goto err_out; goto err_out;
} }
err_out: err_out:
private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private), trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
io_region->ret_code, errstr); io_region->ret_code, errstr);
} }
/*
* Deal with an async request from userspace.
*/
static void fsm_async_request(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct ccw_cmd_region *cmd_region = private->cmd_region;
switch (cmd_region->command) {
case VFIO_CCW_ASYNC_CMD_HSCH:
cmd_region->ret_code = fsm_do_halt(private);
break;
case VFIO_CCW_ASYNC_CMD_CSCH:
cmd_region->ret_code = fsm_do_clear(private);
break;
default:
/* should not happen? */
cmd_region->ret_code = -EINVAL;
}
}
/* /*
* Got an interrupt for a normal io (state busy). * Got an interrupt for a normal io (state busy).
*/ */
...@@ -204,21 +323,31 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = { ...@@ -204,21 +323,31 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_STATE_NOT_OPER] = { [VFIO_CCW_STATE_NOT_OPER] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_nop, [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq, [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
}, },
[VFIO_CCW_STATE_STANDBY] = { [VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
}, },
[VFIO_CCW_STATE_IDLE] = { [VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
}, },
[VFIO_CCW_STATE_BUSY] = { [VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
}, },
}; };
...@@ -3,13 +3,17 @@ ...@@ -3,13 +3,17 @@
* Physical device callbacks for vfio_ccw * Physical device callbacks for vfio_ccw
* *
* Copyright IBM Corp. 2017 * Copyright IBM Corp. 2017
* Copyright Red Hat, Inc. 2019
* *
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
* Cornelia Huck <cohuck@redhat.com>
*/ */
#include <linux/vfio.h> #include <linux/vfio.h>
#include <linux/mdev.h> #include <linux/mdev.h>
#include <linux/nospec.h>
#include <linux/slab.h>
#include "vfio_ccw_private.h" #include "vfio_ccw_private.h"
...@@ -130,11 +134,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev) ...@@ -130,11 +134,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
if ((private->state != VFIO_CCW_STATE_NOT_OPER) && if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) { (private->state != VFIO_CCW_STATE_STANDBY)) {
if (!vfio_ccw_mdev_reset(mdev)) if (!vfio_ccw_sch_quiesce(private->sch))
private->state = VFIO_CCW_STATE_STANDBY; private->state = VFIO_CCW_STATE_STANDBY;
/* The state will be NOT_OPER on error. */ /* The state will be NOT_OPER on error. */
} }
cp_free(&private->cp);
private->mdev = NULL; private->mdev = NULL;
atomic_inc(&private->avail); atomic_inc(&private->avail);
...@@ -146,20 +151,66 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev) ...@@ -146,20 +151,66 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
struct vfio_ccw_private *private = struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev)); dev_get_drvdata(mdev_parent_dev(mdev));
unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
int ret;
private->nb.notifier_call = vfio_ccw_mdev_notifier; private->nb.notifier_call = vfio_ccw_mdev_notifier;
return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&events, &private->nb); &events, &private->nb);
if (ret)
return ret;
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&private->nb);
return ret;
} }
static void vfio_ccw_mdev_release(struct mdev_device *mdev) static void vfio_ccw_mdev_release(struct mdev_device *mdev)
{ {
struct vfio_ccw_private *private = struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev)); dev_get_drvdata(mdev_parent_dev(mdev));
int i;
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
if (!vfio_ccw_mdev_reset(mdev))
private->state = VFIO_CCW_STATE_STANDBY;
/* The state will be NOT_OPER on error. */
}
cp_free(&private->cp);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&private->nb); &private->nb);
for (i = 0; i < private->num_regions; i++)
private->region[i].ops->release(private, &private->region[i]);
private->num_regions = 0;
kfree(private->region);
private->region = NULL;
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_io_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
mutex_lock(&private->io_mutex);
region = private->io_region;
if (copy_to_user(buf, (void *)region + pos, count))
ret = -EFAULT;
else
ret = count;
mutex_unlock(&private->io_mutex);
return ret;
} }
static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev, static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
...@@ -167,18 +218,54 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev, ...@@ -167,18 +218,54 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
size_t count, size_t count,
loff_t *ppos) loff_t *ppos)
{ {
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private; struct vfio_ccw_private *private;
private = dev_get_drvdata(mdev_parent_dev(mdev));
if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
return -EINVAL;
switch (index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
default:
index -= VFIO_CCW_NUM_REGIONS;
return private->region[index].ops->read(private, buf, count,
ppos);
}
return -EINVAL;
}
static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
const char __user *buf,
size_t count, loff_t *ppos)
{
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_io_region *region; struct ccw_io_region *region;
int ret;
if (*ppos + count > sizeof(*region)) if (pos + count > sizeof(*region))
return -EINVAL; return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev)); if (!mutex_trylock(&private->io_mutex))
return -EAGAIN;
region = private->io_region; region = private->io_region;
if (copy_to_user(buf, (void *)region + *ppos, count)) if (copy_from_user((void *)region + pos, buf, count)) {
return -EFAULT; ret = -EFAULT;
goto out_unlock;
}
return count; vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
if (region->ret_code != 0)
private->state = VFIO_CCW_STATE_IDLE;
ret = (region->ret_code != 0) ? region->ret_code : count;
out_unlock:
mutex_unlock(&private->io_mutex);
return ret;
} }
static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev, static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
...@@ -186,42 +273,47 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev, ...@@ -186,42 +273,47 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
size_t count, size_t count,
loff_t *ppos) loff_t *ppos)
{ {
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
struct vfio_ccw_private *private; struct vfio_ccw_private *private;
struct ccw_io_region *region;
if (*ppos + count > sizeof(*region))
return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev)); private = dev_get_drvdata(mdev_parent_dev(mdev));
if (private->state != VFIO_CCW_STATE_IDLE)
return -EACCES;
region = private->io_region; if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
if (copy_from_user((void *)region + *ppos, buf, count)) return -EINVAL;
return -EFAULT;
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ); switch (index) {
if (region->ret_code != 0) { case VFIO_CCW_CONFIG_REGION_INDEX:
private->state = VFIO_CCW_STATE_IDLE; return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
return region->ret_code; default:
index -= VFIO_CCW_NUM_REGIONS;
return private->region[index].ops->write(private, buf, count,
ppos);
} }
return count; return -EINVAL;
} }
static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info) static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
struct mdev_device *mdev)
{ {
struct vfio_ccw_private *private;
private = dev_get_drvdata(mdev_parent_dev(mdev));
info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET; info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
info->num_regions = VFIO_CCW_NUM_REGIONS; info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
info->num_irqs = VFIO_CCW_NUM_IRQS; info->num_irqs = VFIO_CCW_NUM_IRQS;
return 0; return 0;
} }
static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
u16 *cap_type_id, struct mdev_device *mdev,
void **cap_type) unsigned long arg)
{ {
struct vfio_ccw_private *private;
int i;
private = dev_get_drvdata(mdev_parent_dev(mdev));
switch (info->index) { switch (info->index) {
case VFIO_CCW_CONFIG_REGION_INDEX: case VFIO_CCW_CONFIG_REGION_INDEX:
info->offset = 0; info->offset = 0;
...@@ -229,9 +321,55 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, ...@@ -229,9 +321,55 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
info->flags = VFIO_REGION_INFO_FLAG_READ info->flags = VFIO_REGION_INFO_FLAG_READ
| VFIO_REGION_INFO_FLAG_WRITE; | VFIO_REGION_INFO_FLAG_WRITE;
return 0; return 0;
default: default: /* all other regions are handled via capability chain */
{
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
struct vfio_region_info_cap_type cap_type = {
.header.id = VFIO_REGION_INFO_CAP_TYPE,
.header.version = 1 };
int ret;
if (info->index >=
VFIO_CCW_NUM_REGIONS + private->num_regions)
return -EINVAL; return -EINVAL;
info->index = array_index_nospec(info->index,
VFIO_CCW_NUM_REGIONS +
private->num_regions);
i = info->index - VFIO_CCW_NUM_REGIONS;
info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
info->size = private->region[i].size;
info->flags = private->region[i].flags;
cap_type.type = private->region[i].type;
cap_type.subtype = private->region[i].subtype;
ret = vfio_info_add_capability(&caps, &cap_type.header,
sizeof(cap_type));
if (ret)
return ret;
info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info->argsz < sizeof(*info) + caps.size) {
info->argsz = sizeof(*info) + caps.size;
info->cap_offset = 0;
} else {
vfio_info_cap_shift(&caps, sizeof(*info));
if (copy_to_user((void __user *)arg + sizeof(*info),
caps.buf, caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info->cap_offset = sizeof(*info);
}
kfree(caps.buf);
}
} }
return 0;
} }
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
...@@ -308,6 +446,32 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev, ...@@ -308,6 +446,32 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
} }
} }
int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
unsigned int subtype,
const struct vfio_ccw_regops *ops,
size_t size, u32 flags, void *data)
{
struct vfio_ccw_region *region;
region = krealloc(private->region,
(private->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
private->region = region;
private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
private->region[private->num_regions].subtype = subtype;
private->region[private->num_regions].ops = ops;
private->region[private->num_regions].size = size;
private->region[private->num_regions].flags = flags;
private->region[private->num_regions].data = data;
private->num_regions++;
return 0;
}
static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
unsigned int cmd, unsigned int cmd,
unsigned long arg) unsigned long arg)
...@@ -328,7 +492,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, ...@@ -328,7 +492,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz) if (info.argsz < minsz)
return -EINVAL; return -EINVAL;
ret = vfio_ccw_mdev_get_device_info(&info); ret = vfio_ccw_mdev_get_device_info(&info, mdev);
if (ret) if (ret)
return ret; return ret;
...@@ -337,8 +501,6 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, ...@@ -337,8 +501,6 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
case VFIO_DEVICE_GET_REGION_INFO: case VFIO_DEVICE_GET_REGION_INFO:
{ {
struct vfio_region_info info; struct vfio_region_info info;
u16 cap_type_id = 0;
void *cap_type = NULL;
minsz = offsetofend(struct vfio_region_info, offset); minsz = offsetofend(struct vfio_region_info, offset);
...@@ -348,8 +510,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, ...@@ -348,8 +510,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz) if (info.argsz < minsz)
return -EINVAL; return -EINVAL;
ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id, ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
&cap_type);
if (ret) if (ret)
return ret; return ret;
......
...@@ -3,9 +3,11 @@ ...@@ -3,9 +3,11 @@
* Private stuff for vfio_ccw driver * Private stuff for vfio_ccw driver
* *
* Copyright IBM Corp. 2017 * Copyright IBM Corp. 2017
* Copyright Red Hat, Inc. 2019
* *
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
* Cornelia Huck <cohuck@redhat.com>
*/ */
#ifndef _VFIO_CCW_PRIVATE_H_ #ifndef _VFIO_CCW_PRIVATE_H_
...@@ -19,6 +21,40 @@ ...@@ -19,6 +21,40 @@
#include "css.h" #include "css.h"
#include "vfio_ccw_cp.h" #include "vfio_ccw_cp.h"
#define VFIO_CCW_OFFSET_SHIFT 10
#define VFIO_CCW_OFFSET_TO_INDEX(off) (off >> VFIO_CCW_OFFSET_SHIFT)
#define VFIO_CCW_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_CCW_OFFSET_SHIFT)
#define VFIO_CCW_OFFSET_MASK (((u64)(1) << VFIO_CCW_OFFSET_SHIFT) - 1)
/* capability chain handling similar to vfio-pci */
struct vfio_ccw_private;
struct vfio_ccw_region;
struct vfio_ccw_regops {
ssize_t (*read)(struct vfio_ccw_private *private, char __user *buf,
size_t count, loff_t *ppos);
ssize_t (*write)(struct vfio_ccw_private *private,
const char __user *buf, size_t count, loff_t *ppos);
void (*release)(struct vfio_ccw_private *private,
struct vfio_ccw_region *region);
};
struct vfio_ccw_region {
u32 type;
u32 subtype;
const struct vfio_ccw_regops *ops;
void *data;
size_t size;
u32 flags;
};
int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
unsigned int subtype,
const struct vfio_ccw_regops *ops,
size_t size, u32 flags, void *data);
int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
/** /**
* struct vfio_ccw_private * struct vfio_ccw_private
* @sch: pointer to the subchannel * @sch: pointer to the subchannel
...@@ -28,6 +64,10 @@ ...@@ -28,6 +64,10 @@
* @mdev: pointer to the mediated device * @mdev: pointer to the mediated device
* @nb: notifier for vfio events * @nb: notifier for vfio events
* @io_region: MMIO region to input/output I/O arguments/results * @io_region: MMIO region to input/output I/O arguments/results
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
* @cmd_region: MMIO region for asynchronous I/O commands other than START
* @num_regions: number of additional regions
* @cp: channel program for the current I/O operation * @cp: channel program for the current I/O operation
* @irb: irb info received from interrupt * @irb: irb info received from interrupt
* @scsw: scsw info * @scsw: scsw info
...@@ -42,6 +82,10 @@ struct vfio_ccw_private { ...@@ -42,6 +82,10 @@ struct vfio_ccw_private {
struct mdev_device *mdev; struct mdev_device *mdev;
struct notifier_block nb; struct notifier_block nb;
struct ccw_io_region *io_region; struct ccw_io_region *io_region;
struct mutex io_mutex;
struct vfio_ccw_region *region;
struct ccw_cmd_region *cmd_region;
int num_regions;
struct channel_program cp; struct channel_program cp;
struct irb irb; struct irb irb;
...@@ -63,7 +107,8 @@ enum vfio_ccw_state { ...@@ -63,7 +107,8 @@ enum vfio_ccw_state {
VFIO_CCW_STATE_NOT_OPER, VFIO_CCW_STATE_NOT_OPER,
VFIO_CCW_STATE_STANDBY, VFIO_CCW_STATE_STANDBY,
VFIO_CCW_STATE_IDLE, VFIO_CCW_STATE_IDLE,
VFIO_CCW_STATE_BUSY, VFIO_CCW_STATE_CP_PROCESSING,
VFIO_CCW_STATE_CP_PENDING,
/* last element! */ /* last element! */
NR_VFIO_CCW_STATES NR_VFIO_CCW_STATES
}; };
...@@ -75,6 +120,7 @@ enum vfio_ccw_event { ...@@ -75,6 +120,7 @@ enum vfio_ccw_event {
VFIO_CCW_EVENT_NOT_OPER, VFIO_CCW_EVENT_NOT_OPER,
VFIO_CCW_EVENT_IO_REQ, VFIO_CCW_EVENT_IO_REQ,
VFIO_CCW_EVENT_INTERRUPT, VFIO_CCW_EVENT_INTERRUPT,
VFIO_CCW_EVENT_ASYNC_REQ,
/* last element! */ /* last element! */
NR_VFIO_CCW_EVENTS NR_VFIO_CCW_EVENTS
}; };
......
...@@ -353,6 +353,10 @@ struct vfio_region_gfx_edid { ...@@ -353,6 +353,10 @@ struct vfio_region_gfx_edid {
#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2 #define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
}; };
#define VFIO_REGION_TYPE_CCW (2)
/* ccw sub-types */
#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
/* /*
* 10de vendor sub-type * 10de vendor sub-type
* *
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/types.h> #include <linux/types.h>
/* used for START SUBCHANNEL, always present */
struct ccw_io_region { struct ccw_io_region {
#define ORB_AREA_SIZE 12 #define ORB_AREA_SIZE 12
__u8 orb_area[ORB_AREA_SIZE]; __u8 orb_area[ORB_AREA_SIZE];
...@@ -22,4 +23,15 @@ struct ccw_io_region { ...@@ -22,4 +23,15 @@ struct ccw_io_region {
__u32 ret_code; __u32 ret_code;
} __packed; } __packed;
/*
* used for processing commands that trigger asynchronous actions
* Note: this is controlled by a capability
*/
#define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0)
#define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1)
struct ccw_cmd_region {
__u32 command;
__u32 ret_code;
} __packed;
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment