Commit 06caaa27 authored by Eric Farman's avatar Eric Farman Committed by Alex Williamson

vfio/ccw: move private initialization to callback

There's already a device initialization callback that is used to
initialize the release completion workaround that was introduced
by commit ebb72b76 ("vfio/ccw: Use the new device life cycle
helpers").

Move the other elements of the vfio_ccw_private struct that
require distinct initialization over to that routine.

With that done, the vfio_ccw_alloc_private routine only does a
kzalloc, so fold it inline.
Signed-off-by: default avatarEric Farman <farman@linux.ibm.com>
Reviewed-by: default avatarMatthew Rosato <mjrosato@linux.ibm.com>
Link: https://lore.kernel.org/r/20221104142007.1314999-4-farman@linux.ibm.comSigned-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 008a011d
......@@ -23,10 +23,10 @@
#include "vfio_ccw_private.h"
struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region;
static struct kmem_cache *vfio_ccw_cmd_region;
static struct kmem_cache *vfio_ccw_schib_region;
static struct kmem_cache *vfio_ccw_crw_region;
struct kmem_cache *vfio_ccw_io_region;
struct kmem_cache *vfio_ccw_cmd_region;
struct kmem_cache *vfio_ccw_schib_region;
struct kmem_cache *vfio_ccw_crw_region;
debug_info_t *vfio_ccw_debug_msg_id;
debug_info_t *vfio_ccw_debug_trace_id;
......@@ -79,7 +79,7 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
return ret;
}
static void vfio_ccw_sch_io_todo(struct work_struct *work)
void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
struct irb *irb;
......@@ -115,7 +115,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
eventfd_signal(private->io_trigger, 1);
}
static void vfio_ccw_crw_todo(struct work_struct *work)
void vfio_ccw_crw_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
......@@ -152,62 +152,6 @@ static void vfio_ccw_sch_irq(struct subchannel *sch)
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
}
static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
{
struct vfio_ccw_private *private;
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return ERR_PTR(-ENOMEM);
mutex_init(&private->io_mutex);
private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
GFP_KERNEL);
if (!private->cp.guest_cp)
goto out_free_private;
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region)
goto out_free_cp;
private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
GFP_KERNEL | GFP_DMA);
if (!private->cmd_region)
goto out_free_io;
private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
GFP_KERNEL | GFP_DMA);
if (!private->schib_region)
goto out_free_cmd;
private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
GFP_KERNEL | GFP_DMA);
if (!private->crw_region)
goto out_free_schib;
return private;
out_free_schib:
kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
out_free_cmd:
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
out_free_io:
kmem_cache_free(vfio_ccw_io_region, private->io_region);
out_free_cp:
kfree(private->cp.guest_cp);
out_free_private:
mutex_destroy(&private->io_mutex);
kfree(private);
return ERR_PTR(-ENOMEM);
}
static void vfio_ccw_free_private(struct vfio_ccw_private *private)
{
struct vfio_ccw_crw *crw, *temp;
......@@ -257,10 +201,10 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_free;
private = vfio_ccw_alloc_private(sch);
if (IS_ERR(private)) {
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private) {
device_unregister(&parent->dev);
return PTR_ERR(private);
return -ENOMEM;
}
dev_set_drvdata(&sch->dev, parent);
......
......@@ -49,8 +49,51 @@ static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev)
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
mutex_init(&private->io_mutex);
private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
init_completion(&private->release_comp);
private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
GFP_KERNEL);
if (!private->cp.guest_cp)
goto out_free_private;
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region)
goto out_free_cp;
private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
GFP_KERNEL | GFP_DMA);
if (!private->cmd_region)
goto out_free_io;
private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
GFP_KERNEL | GFP_DMA);
if (!private->schib_region)
goto out_free_cmd;
private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
GFP_KERNEL | GFP_DMA);
if (!private->crw_region)
goto out_free_schib;
return 0;
out_free_schib:
kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
out_free_cmd:
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
out_free_io:
kmem_cache_free(vfio_ccw_io_region, private->io_region);
out_free_cp:
kfree(private->cp.guest_cp);
out_free_private:
mutex_destroy(&private->io_mutex);
return -ENOMEM;
}
static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
......
......@@ -131,6 +131,8 @@ struct vfio_ccw_private {
} __aligned(8);
int vfio_ccw_sch_quiesce(struct subchannel *sch);
void vfio_ccw_sch_io_todo(struct work_struct *work);
void vfio_ccw_crw_todo(struct work_struct *work);
extern struct mdev_driver vfio_ccw_mdev_driver;
......@@ -178,7 +180,10 @@ static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private,
}
extern struct workqueue_struct *vfio_ccw_work_q;
extern struct kmem_cache *vfio_ccw_io_region;
extern struct kmem_cache *vfio_ccw_cmd_region;
extern struct kmem_cache *vfio_ccw_schib_region;
extern struct kmem_cache *vfio_ccw_crw_region;
/* s390 debug feature, similar to base cio */
extern debug_info_t *vfio_ccw_debug_msg_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment