Commit a1120db3 authored by Dave Stevenson's avatar Dave Stevenson Committed by Greg Kroah-Hartman

staging: bcm2835-camera: Do not bulk receive from service thread

vchi_bulk_queue_receive will queue up to a default of 4
bulk receives on a connection before blocking.
If called from the VCHI service_callback thread, then
that thread is unable to service the VCHI_CALLBACK_BULK_RECEIVED
events that would enable the queue call to succeed.

Add a workqueue to schedule the call vchi_bulk_queue_receive
in an alternate context to avoid the lock up.
Signed-off-by: default avatarDave Stevenson <dave.stevenson@raspberrypi.org>
Signed-off-by: default avatarStefan Wahren <wahrenst@gmx.net>
Acked-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
Acked-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8dedab29
...@@ -117,8 +117,10 @@ struct mmal_msg_context { ...@@ -117,8 +117,10 @@ struct mmal_msg_context {
union { union {
struct { struct {
/* work struct for defered callback - must come first */ /* work struct for buffer_cb callback */
struct work_struct work; struct work_struct work;
/* work struct for deferred callback */
struct work_struct buffer_to_host_work;
/* mmal instance */ /* mmal instance */
struct vchiq_mmal_instance *instance; struct vchiq_mmal_instance *instance;
/* mmal port */ /* mmal port */
...@@ -167,6 +169,9 @@ struct vchiq_mmal_instance { ...@@ -167,6 +169,9 @@ struct vchiq_mmal_instance {
/* component to use next */ /* component to use next */
int component_idx; int component_idx;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS]; struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
/* ordered workqueue to process all bulk operations */
struct workqueue_struct *bulk_wq;
}; };
static struct mmal_msg_context * static struct mmal_msg_context *
...@@ -248,7 +253,44 @@ static void buffer_work_cb(struct work_struct *work) ...@@ -248,7 +253,44 @@ static void buffer_work_cb(struct work_struct *work)
msg_context->u.bulk.mmal_flags, msg_context->u.bulk.mmal_flags,
msg_context->u.bulk.dts, msg_context->u.bulk.dts,
msg_context->u.bulk.pts); msg_context->u.bulk.pts);
}
/* workqueue scheduled callback to handle receiving buffers
*
* VCHI will allow up to 4 bulk receives to be scheduled before blocking.
* If we block in the service_callback context then we can't process the
* VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
* vchi_bulk_queue_receive() call to complete.
*/
static void buffer_to_host_work_cb(struct work_struct *work)
{
struct mmal_msg_context *msg_context =
container_of(work, struct mmal_msg_context,
u.bulk.buffer_to_host_work);
struct vchiq_mmal_instance *instance = msg_context->instance;
unsigned long len = msg_context->u.bulk.buffer_used;
int ret;
if (!len)
/* Dummy receive to ensure the buffers remain in order */
len = 8;
/* queue the bulk submission */
vchi_service_use(instance->handle);
ret = vchi_bulk_queue_receive(instance->handle,
msg_context->u.bulk.buffer->buffer,
/* Actual receive needs to be a multiple
* of 4 bytes
*/
(len + 3) & ~3,
VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
msg_context);
vchi_service_release(instance->handle);
if (ret != 0)
pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
__func__, msg_context, ret);
} }
/* enqueue a bulk receive for a given message context */ /* enqueue a bulk receive for a given message context */
...@@ -257,7 +299,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance, ...@@ -257,7 +299,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context) struct mmal_msg_context *msg_context)
{ {
unsigned long rd_len; unsigned long rd_len;
int ret;
rd_len = msg->u.buffer_from_host.buffer_header.length; rd_len = msg->u.buffer_from_host.buffer_header.length;
...@@ -293,45 +334,10 @@ static int bulk_receive(struct vchiq_mmal_instance *instance, ...@@ -293,45 +334,10 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts; msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts; msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
/* queue the bulk submission */ queue_work(msg_context->instance->bulk_wq,
vchi_service_use(instance->handle); &msg_context->u.bulk.buffer_to_host_work);
ret = vchi_bulk_queue_receive(instance->handle,
msg_context->u.bulk.buffer->buffer,
/* Actual receive needs to be a multiple
* of 4 bytes
*/
(rd_len + 3) & ~3,
VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
msg_context);
vchi_service_release(instance->handle);
return ret;
}
/* enque a dummy bulk receive for a given message context */
static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
struct mmal_msg_context *msg_context)
{
int ret;
/* zero length indicates this was a dummy transfer */
msg_context->u.bulk.buffer_used = 0;
/* queue the bulk submission */
vchi_service_use(instance->handle);
ret = vchi_bulk_queue_receive(instance->handle,
instance->bulk_scratch,
8,
VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
msg_context);
vchi_service_release(instance->handle); return 0;
return ret;
} }
/* data in message, memcpy from packet into output buffer */ /* data in message, memcpy from packet into output buffer */
...@@ -379,6 +385,8 @@ buffer_from_host(struct vchiq_mmal_instance *instance, ...@@ -379,6 +385,8 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
/* initialise work structure ready to schedule callback */ /* initialise work structure ready to schedule callback */
INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb); INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
buffer_to_host_work_cb);
/* prep the buffer from host message */ /* prep the buffer from host message */
memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */ memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
...@@ -459,7 +467,7 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance, ...@@ -459,7 +467,7 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
if (msg->u.buffer_from_host.buffer_header.flags & if (msg->u.buffer_from_host.buffer_header.flags &
MMAL_BUFFER_HEADER_FLAG_EOS) { MMAL_BUFFER_HEADER_FLAG_EOS) {
msg_context->u.bulk.status = msg_context->u.bulk.status =
dummy_bulk_receive(instance, msg_context); bulk_receive(instance, msg, msg_context);
if (msg_context->u.bulk.status == 0) if (msg_context->u.bulk.status == 0)
return; /* successful bulk submission, bulk return; /* successful bulk submission, bulk
* completion will trigger callback * completion will trigger callback
...@@ -1793,6 +1801,9 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance) ...@@ -1793,6 +1801,9 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
mutex_unlock(&instance->vchiq_mutex); mutex_unlock(&instance->vchiq_mutex);
flush_workqueue(instance->bulk_wq);
destroy_workqueue(instance->bulk_wq);
vfree(instance->bulk_scratch); vfree(instance->bulk_scratch);
idr_destroy(&instance->context_map); idr_destroy(&instance->context_map);
...@@ -1855,6 +1866,11 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance) ...@@ -1855,6 +1866,11 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
params.callback_param = instance; params.callback_param = instance;
instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
WQ_MEM_RECLAIM);
if (!instance->bulk_wq)
goto err_free;
status = vchi_service_open(vchi_instance, &params, &instance->handle); status = vchi_service_open(vchi_instance, &params, &instance->handle);
if (status) { if (status) {
pr_err("Failed to open VCHI service connection (status=%d)\n", pr_err("Failed to open VCHI service connection (status=%d)\n",
...@@ -1869,8 +1885,9 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance) ...@@ -1869,8 +1885,9 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
return 0; return 0;
err_close_services: err_close_services:
vchi_service_close(instance->handle); vchi_service_close(instance->handle);
destroy_workqueue(instance->bulk_wq);
err_free:
vfree(instance->bulk_scratch); vfree(instance->bulk_scratch);
kfree(instance); kfree(instance);
return -ENODEV; return -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment