Commit 866c4b8a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 's390-5.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Vasily Gorbik:
 "Fix races in vfio-ccw request handling"

* tag 's390-5.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  vfio-ccw: Serialize FSM IDLE state with I/O completion
  vfio-ccw: Reset FSM state to IDLE inside FSM
  vfio-ccw: Check initialized flag in cp_init()
parents 6799d4f2 ffa99c43
...@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) ...@@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1); static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
int ret; int ret;
/* this is an error in the caller */
if (cp->initialized)
return -EBUSY;
/* /*
* We only support prefetching the channel program. We assume all channel * We only support prefetching the channel program. We assume all channel
* programs executed by supported guests likewise support prefetching. * programs executed by supported guests likewise support prefetching.
......
...@@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) ...@@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
struct vfio_ccw_private *private; struct vfio_ccw_private *private;
struct irb *irb; struct irb *irb;
bool is_final; bool is_final;
bool cp_is_finished = false;
private = container_of(work, struct vfio_ccw_private, io_work); private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb; irb = &private->irb;
...@@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) ...@@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) { if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw); cp_update_scsw(&private->cp, &irb->scsw);
if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
cp_free(&private->cp); cp_free(&private->cp);
cp_is_finished = true;
}
} }
mutex_lock(&private->io_mutex); mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb)); memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex); mutex_unlock(&private->io_mutex);
if (private->mdev && is_final) /*
* Reset to IDLE only if processing of a channel program
* has finished. Do not overwrite a possible processing
* state if the final interrupt was for HSCH or CSCH.
*/
if (private->mdev && cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE; private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger) if (private->io_trigger)
......
...@@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, ...@@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
} }
err_out: err_out:
private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid, trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
io_region->ret_code, errstr); io_region->ret_code, errstr);
} }
......
...@@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private, ...@@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
} }
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ); vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
if (region->ret_code != 0)
private->state = VFIO_CCW_STATE_IDLE;
ret = (region->ret_code != 0) ? region->ret_code : count; ret = (region->ret_code != 0) ? region->ret_code : count;
out_unlock: out_unlock:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment