Commit 71fcbc47 authored by Dave Stevenson's avatar Dave Stevenson Committed by Greg Kroah-Hartman

staging: bcm2835-camera: Remove bulk_mutex as it is not required

There is no requirement to serialise bulk transfers as that is all
done in VCHI, and if a second MMAL_MSG_TYPE_BUFFER_TO_HOST happened
before the VCHI_CALLBACK_BULK_RECEIVED, then the service_callback
thread is deadlocked.

Remove the bulk_mutex so that multiple receives can be scheduled at a
time.
Signed-off-by: default avatarDave Stevenson <dave.stevenson@raspberrypi.org>
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 96b7e81a
......@@ -162,9 +162,6 @@ struct vchiq_mmal_instance {
/* ensure serialised access to service */
struct mutex vchiq_mutex;
/* ensure serialised access to bulk operations */
struct mutex bulk_mutex;
/* vmalloc page to receive scratch bulk xfers into */
void *bulk_scratch;
......@@ -332,13 +329,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
unsigned long flags = 0;
int ret;
/* bulk mutex stops other bulk operations while we have a
* receive in progress - released in callback
*/
ret = mutex_lock_interruptible(&instance->bulk_mutex);
if (ret != 0)
return ret;
rd_len = msg->u.buffer_from_host.buffer_header.length;
/* take buffer from queue */
......@@ -357,8 +347,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
* waiting bulk receive?
*/
mutex_unlock(&instance->bulk_mutex);
return -EINVAL;
}
......@@ -399,11 +387,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
vchi_service_release(instance->handle);
if (ret != 0) {
/* callback will not be clearing the mutex */
mutex_unlock(&instance->bulk_mutex);
}
return ret;
}
......@@ -413,13 +396,6 @@ static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
{
int ret;
/* bulk mutex stops other bulk operations while we have a
* receive in progress - released in callback
*/
ret = mutex_lock_interruptible(&instance->bulk_mutex);
if (ret != 0)
return ret;
/* zero length indicates this was a dummy transfer */
msg_context->u.bulk.buffer_used = 0;
......@@ -435,11 +411,6 @@ static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
vchi_service_release(instance->handle);
if (ret != 0) {
/* callback will not be clearing the mutex */
mutex_unlock(&instance->bulk_mutex);
}
return ret;
}
......@@ -494,18 +465,11 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
/* bulk mutex stops other bulk operations while we
* have a receive in progress
*/
if (mutex_lock_interruptible(&instance->bulk_mutex))
return -EINTR;
/* get context */
if (!buf->msg_context) {
pr_err("%s: msg_context not allocated, buf %p\n", __func__,
buf);
ret = -EINVAL;
goto unlock;
return -EINVAL;
}
msg_context = buf->msg_context;
......@@ -559,9 +523,6 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
vchi_service_release(instance->handle);
unlock:
mutex_unlock(&instance->bulk_mutex);
return ret;
}
......@@ -685,9 +646,6 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
/* bulk receive operation complete */
mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
/* replace the buffer header */
port_buffer_from_host(msg_context->u.bulk.instance,
msg_context->u.bulk.port);
......@@ -703,9 +661,6 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
{
pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
/* bulk receive operation complete */
mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
/* replace the buffer header */
port_buffer_from_host(msg_context->u.bulk.instance,
msg_context->u.bulk.port);
......@@ -2042,7 +1997,6 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
return -ENOMEM;
mutex_init(&instance->vchiq_mutex);
mutex_init(&instance->bulk_mutex);
instance->bulk_scratch = vmalloc(PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment