Commit ae76e134 authored by Michal Nazarewicz's avatar Michal Nazarewicz Committed by Felipe Balbi

usb: f_fs: refactor ffs_epfile_io

Eliminate one of the return paths by using a ‘goto error_mutex’ and
rearrange some if-bodies which results in reduction of the indention level
and thus hopefully makes the function easier to read and reason about.
Signed-off-by: default avatarMichal Nazarewicz <mina86@mina86.com>
Signed-off-by: default avatarFelipe Balbi <balbi@kernel.org>
parent b3591f67
......@@ -684,6 +684,7 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
struct usb_request *req;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len = -EINVAL;
......@@ -713,7 +714,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
if (!halt) {
/*
* if we _do_ wait above, the epfile->ffs->gadget might be NULL
* before the waiting completes, so do not assign to 'gadget' earlier
* before the waiting completes, so do not assign to 'gadget'
* earlier
*/
struct usb_gadget *gadget = epfile->ffs->gadget;
size_t copied;
......@@ -755,17 +757,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
if (epfile->ep != ep) {
/* In the meantime, endpoint got disabled or changed. */
ret = -ESHUTDOWN;
goto error_lock;
} else if (halt) {
/* Halt */
if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
usb_ep_set_halt(ep->ep);
spin_unlock_irq(&epfile->ffs->eps_lock);
ret = -EBADMSG;
} else {
/* Fire the request */
struct usb_request *req;
} else if (unlikely(data_len == -EINVAL)) {
/*
* Sanity Check: even though data_len can't be used
* uninitialized at the time I write this comment, some
......@@ -777,82 +774,74 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* For such reason, we're adding this redundant sanity check
* here.
*/
if (unlikely(data_len == -EINVAL)) {
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
goto error_lock;
}
if (io_data->aio) {
req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
if (unlikely(!req)) {
ret = -ENOMEM;
goto error_lock;
}
req->buf = data;
req->length = data_len;
WARN(1, "%s: data_len == -EINVAL\n", __func__);
ret = -EINVAL;
} else if (!io_data->aio) {
DECLARE_COMPLETION_ONSTACK(done);
io_data->buf = data;
io_data->ep = ep->ep;
io_data->req = req;
io_data->ffs = epfile->ffs;
req = ep->req;
req->buf = data;
req->length = data_len;
req->context = io_data;
req->complete = ffs_epfile_async_io_complete;
req->context = &done;
req->complete = ffs_epfile_io_complete;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (unlikely(ret)) {
usb_ep_free_request(ep->ep, req);
goto error_lock;
}
ret = -EIOCBQUEUED;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (unlikely(ret < 0))
goto error_lock;
spin_unlock_irq(&epfile->ffs->eps_lock);
} else {
DECLARE_COMPLETION_ONSTACK(done);
spin_unlock_irq(&epfile->ffs->eps_lock);
req = ep->req;
req->buf = data;
req->length = data_len;
if (unlikely(wait_for_completion_interruptible(&done))) {
ret = -EINTR;
usb_ep_dequeue(ep->ep, req);
goto error_mutex;
}
req->context = &done;
req->complete = ffs_epfile_io_complete;
/*
* XXX We may end up silently droping data here. Since data_len
* (i.e. req->length) may be bigger than len (after being
* rounded up to maxpacketsize), we may end up with more data
* then user space has space for.
*/
ret = ep->status;
if (io_data->read && ret > 0) {
ret = copy_to_iter(data, ret, &io_data->data);
if (!ret)
ret = -EFAULT;
}
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
ret = -ENOMEM;
} else {
req->buf = data;
req->length = data_len;
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
io_data->buf = data;
io_data->ep = ep->ep;
io_data->req = req;
io_data->ffs = epfile->ffs;
spin_unlock_irq(&epfile->ffs->eps_lock);
req->context = io_data;
req->complete = ffs_epfile_async_io_complete;
if (unlikely(ret < 0)) {
/* nop */
} else if (unlikely(
wait_for_completion_interruptible(&done))) {
ret = -EINTR;
usb_ep_dequeue(ep->ep, req);
} else {
/*
* XXX We may end up silently droping data
* here. Since data_len (i.e. req->length) may
* be bigger than len (after being rounded up
* to maxpacketsize), we may end up with more
* data then user space has space for.
*/
ret = ep->status;
if (io_data->read && ret > 0) {
ret = copy_to_iter(data, ret, &io_data->data);
if (!ret)
ret = -EFAULT;
}
}
kfree(data);
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (unlikely(ret)) {
usb_ep_free_request(ep->ep, req);
goto error_lock;
}
}
mutex_unlock(&epfile->mutex);
return ret;
ret = -EIOCBQUEUED;
/*
* Do not kfree the buffer in this function. It will be freed
* by ffs_user_copy_worker.
*/
data = NULL;
}
error_lock:
spin_unlock_irq(&epfile->ffs->eps_lock);
error_mutex:
mutex_unlock(&epfile->mutex);
error:
kfree(data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment