Commit 4e6bafdf authored by Michael Zoran's avatar Michael Zoran Committed by Greg Kroah-Hartman

staging: bcm2835_camera: Use a mapping table for context field of mmal_msg_header

The camera driver passes messages back and forth between the firmware with
requests and replies.  One of the fields of the message header called
context is a pointer so the size changes between 32 bit and 64 bit.

The context field is used to pair reply messages from the firmware with
request messages from the kernel.  The simple solution would be
to use the padding field for the upper 32 bits of pointers, but this
would rely on the firmware always copying the pad field.

So instead handles are generated that are 32 bit numbers and a mapping
stored in a btree as implemented by the btree library in the kernel lib
directory.  The mapping pairs the handle with the pointer to the actual
data. The btree library was chosen since it's very easy to use and
red black trees would be overkill.

The camera driver also now forces in the btree library if the camera is
included in the build.  The btree library is a hidden configuration
option.
Signed-off-by: default avatarMichael Zoran <mzoran@crowfest.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 81b2cbdb
...@@ -5,6 +5,7 @@ config VIDEO_BCM2835 ...@@ -5,6 +5,7 @@ config VIDEO_BCM2835
depends on ARM depends on ARM
select BCM2835_VCHIQ select BCM2835_VCHIQ
select VIDEOBUF2_VMALLOC select VIDEOBUF2_VMALLOC
select BTREE
help help
Say Y here to enable camera host interface devices for Say Y here to enable camera host interface devices for
Broadcom BCM2835 SoC. This operates over the VCHIQ interface Broadcom BCM2835 SoC. This operates over the VCHIQ interface
......
...@@ -86,7 +86,7 @@ struct mmal_msg_header { ...@@ -86,7 +86,7 @@ struct mmal_msg_header {
/* Opaque handle to the control service */ /* Opaque handle to the control service */
u32 control_service; u32 control_service;
struct mmal_msg_context *context; /** a u32 per message context */ u32 context; /** a u32 per message context */
u32 status; /** The status of the vchiq operation */ u32 status; /** The status of the vchiq operation */
u32 padding; u32 padding;
}; };
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/btree.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <media/videobuf2-vmalloc.h> #include <media/videobuf2-vmalloc.h>
...@@ -108,8 +110,13 @@ static const char *const port_action_type_names[] = { ...@@ -108,8 +110,13 @@ static const char *const port_action_type_names[] = {
#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
#endif #endif
struct vchiq_mmal_instance;
/* normal message context */ /* normal message context */
struct mmal_msg_context { struct mmal_msg_context {
struct vchiq_mmal_instance *instance;
u32 handle;
union { union {
struct { struct {
/* work struct for defered callback - must come first */ /* work struct for defered callback - must come first */
...@@ -146,6 +153,13 @@ struct mmal_msg_context { ...@@ -146,6 +153,13 @@ struct mmal_msg_context {
}; };
struct vchiq_mmal_context_map {
/* ensure serialized access to the btree(contention should be low) */
spinlock_t spinlock;
struct btree_head32 btree_head;
u32 last_handle;
};
struct vchiq_mmal_instance { struct vchiq_mmal_instance {
VCHI_SERVICE_HANDLE_T handle; VCHI_SERVICE_HANDLE_T handle;
...@@ -158,13 +172,90 @@ struct vchiq_mmal_instance { ...@@ -158,13 +172,90 @@ struct vchiq_mmal_instance {
/* vmalloc page to receive scratch bulk xfers into */ /* vmalloc page to receive scratch bulk xfers into */
void *bulk_scratch; void *bulk_scratch;
/* mapping table between context handles and mmal_msg_contexts */
struct vchiq_mmal_context_map context_map;
/* component to use next */ /* component to use next */
int component_idx; int component_idx;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS]; struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
}; };
static struct mmal_msg_context *get_msg_context(struct vchiq_mmal_instance static int __must_check
*instance) mmal_context_map_init(struct vchiq_mmal_context_map *context_map)
{
spin_lock_init(&context_map->spinlock);
context_map->last_handle = 0;
return btree_init32(&context_map->btree_head);
}
static void mmal_context_map_destroy(struct vchiq_mmal_context_map *context_map)
{
spin_lock(&context_map->spinlock);
btree_destroy32(&context_map->btree_head);
spin_unlock(&context_map->spinlock);
}
static u32
mmal_context_map_create_handle(struct vchiq_mmal_context_map *context_map,
struct mmal_msg_context *msg_context,
gfp_t gfp)
{
u32 handle;
spin_lock(&context_map->spinlock);
while (1) {
/* just use a simple count for handles, but do not use 0 */
context_map->last_handle++;
if (!context_map->last_handle)
context_map->last_handle++;
handle = context_map->last_handle;
/* check if the handle is already in use */
if (!btree_lookup32(&context_map->btree_head, handle))
break;
}
if (btree_insert32(&context_map->btree_head, handle,
msg_context, gfp)) {
/* probably out of memory */
spin_unlock(&context_map->spinlock);
return 0;
}
spin_unlock(&context_map->spinlock);
return handle;
}
static struct mmal_msg_context *
mmal_context_map_lookup_handle(struct vchiq_mmal_context_map *context_map,
u32 handle)
{
struct mmal_msg_context *msg_context;
if (!handle)
return NULL;
spin_lock(&context_map->spinlock);
msg_context = btree_lookup32(&context_map->btree_head, handle);
spin_unlock(&context_map->spinlock);
return msg_context;
}
static void
mmal_context_map_destroy_handle(struct vchiq_mmal_context_map *context_map,
u32 handle)
{
spin_lock(&context_map->spinlock);
btree_remove32(&context_map->btree_head, handle);
spin_unlock(&context_map->spinlock);
}
static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance *instance)
{ {
struct mmal_msg_context *msg_context; struct mmal_msg_context *msg_context;
...@@ -172,11 +263,32 @@ static struct mmal_msg_context *get_msg_context(struct vchiq_mmal_instance ...@@ -172,11 +263,32 @@ static struct mmal_msg_context *get_msg_context(struct vchiq_mmal_instance
msg_context = kmalloc(sizeof(*msg_context), GFP_KERNEL); msg_context = kmalloc(sizeof(*msg_context), GFP_KERNEL);
memset(msg_context, 0, sizeof(*msg_context)); memset(msg_context, 0, sizeof(*msg_context));
msg_context->instance = instance;
msg_context->handle =
mmal_context_map_create_handle(&instance->context_map,
msg_context,
GFP_KERNEL);
if (!msg_context->handle) {
kfree(msg_context);
return NULL;
}
return msg_context; return msg_context;
} }
static void release_msg_context(struct mmal_msg_context *msg_context) static struct mmal_msg_context *
lookup_msg_context(struct vchiq_mmal_instance *instance, u32 handle)
{
return mmal_context_map_lookup_handle(&instance->context_map,
handle);
}
static void
release_msg_context(struct mmal_msg_context *msg_context)
{ {
mmal_context_map_destroy_handle(&msg_context->instance->context_map,
msg_context->handle);
kfree(msg_context); kfree(msg_context);
} }
...@@ -199,7 +311,8 @@ static void event_to_host_cb(struct vchiq_mmal_instance *instance, ...@@ -199,7 +311,8 @@ static void event_to_host_cb(struct vchiq_mmal_instance *instance,
*/ */
static void buffer_work_cb(struct work_struct *work) static void buffer_work_cb(struct work_struct *work)
{ {
struct mmal_msg_context *msg_context = (struct mmal_msg_context *)work; struct mmal_msg_context *msg_context =
container_of(work, struct mmal_msg_context, u.bulk.work);
msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance, msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
msg_context->u.bulk.port, msg_context->u.bulk.port,
...@@ -412,7 +525,7 @@ buffer_from_host(struct vchiq_mmal_instance *instance, ...@@ -412,7 +525,7 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST; m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
m.h.magic = MMAL_MAGIC; m.h.magic = MMAL_MAGIC;
m.h.context = msg_context; m.h.context = msg_context->handle;
m.h.status = 0; m.h.status = 0;
/* drvbuf is our private data passed back */ /* drvbuf is our private data passed back */
...@@ -610,6 +723,7 @@ static void service_callback(void *param, ...@@ -610,6 +723,7 @@ static void service_callback(void *param,
u32 msg_len; u32 msg_len;
struct mmal_msg *msg; struct mmal_msg *msg;
VCHI_HELD_MSG_T msg_handle; VCHI_HELD_MSG_T msg_handle;
struct mmal_msg_context *msg_context;
if (!instance) { if (!instance) {
pr_err("Message callback passed NULL instance\n"); pr_err("Message callback passed NULL instance\n");
...@@ -646,23 +760,25 @@ static void service_callback(void *param, ...@@ -646,23 +760,25 @@ static void service_callback(void *param,
default: default:
/* messages dependent on header context to complete */ /* messages dependent on header context to complete */
/* todo: the msg.context really ought to be sanity
* checked before we just use it, afaict it comes back
* and is used raw from the videocore. Perhaps it
* should be verified the address lies in the kernel
* address space.
*/
if (!msg->h.context) { if (!msg->h.context) {
pr_err("received message context was null!\n"); pr_err("received message context was null!\n");
vchi_held_msg_release(&msg_handle); vchi_held_msg_release(&msg_handle);
break; break;
} }
msg_context = lookup_msg_context(instance,
msg->h.context);
if (!msg_context) {
pr_err("received invalid message context %u!\n",
msg->h.context);
vchi_held_msg_release(&msg_handle);
break;
}
/* fill in context values */ /* fill in context values */
msg->h.context->u.sync.msg_handle = msg_handle; msg_context->u.sync.msg_handle = msg_handle;
msg->h.context->u.sync.msg = msg; msg_context->u.sync.msg = msg;
msg->h.context->u.sync.msg_len = msg_len; msg_context->u.sync.msg_len = msg_len;
/* todo: should this check (completion_done() /* todo: should this check (completion_done()
* == 1) for no one waiting? or do we need a * == 1) for no one waiting? or do we need a
...@@ -674,7 +790,7 @@ static void service_callback(void *param, ...@@ -674,7 +790,7 @@ static void service_callback(void *param,
*/ */
/* complete message so caller knows it happened */ /* complete message so caller knows it happened */
complete(&msg->h.context->u.sync.cmplt); complete(&msg_context->u.sync.cmplt);
break; break;
} }
...@@ -706,7 +822,7 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance, ...@@ -706,7 +822,7 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
struct mmal_msg **msg_out, struct mmal_msg **msg_out,
VCHI_HELD_MSG_T *msg_handle_out) VCHI_HELD_MSG_T *msg_handle_out)
{ {
struct mmal_msg_context msg_context; struct mmal_msg_context *msg_context;
int ret; int ret;
/* payload size must not cause message to exceed max size */ /* payload size must not cause message to exceed max size */
...@@ -717,10 +833,14 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance, ...@@ -717,10 +833,14 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
return -EINVAL; return -EINVAL;
} }
init_completion(&msg_context.u.sync.cmplt); msg_context = get_msg_context(instance);
if (!msg_context)
return -ENOMEM;
init_completion(&msg_context->u.sync.cmplt);
msg->h.magic = MMAL_MAGIC; msg->h.magic = MMAL_MAGIC;
msg->h.context = &msg_context; msg->h.context = msg_context->handle;
msg->h.status = 0; msg->h.status = 0;
DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len), DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
...@@ -737,20 +857,23 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance, ...@@ -737,20 +857,23 @@ static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
if (ret) { if (ret) {
pr_err("error %d queuing message\n", ret); pr_err("error %d queuing message\n", ret);
release_msg_context(msg_context);
return ret; return ret;
} }
ret = wait_for_completion_timeout(&msg_context.u.sync.cmplt, 3 * HZ); ret = wait_for_completion_timeout(&msg_context->u.sync.cmplt, 3 * HZ);
if (ret <= 0) { if (ret <= 0) {
pr_err("error %d waiting for sync completion\n", ret); pr_err("error %d waiting for sync completion\n", ret);
if (ret == 0) if (ret == 0)
ret = -ETIME; ret = -ETIME;
/* todo: what happens if the message arrives after aborting */ /* todo: what happens if the message arrives after aborting */
release_msg_context(msg_context);
return ret; return ret;
} }
*msg_out = msg_context.u.sync.msg; *msg_out = msg_context->u.sync.msg;
*msg_handle_out = msg_context.u.sync.msg_handle; *msg_handle_out = msg_context->u.sync.msg_handle;
release_msg_context(msg_context);
return 0; return 0;
} }
...@@ -1829,6 +1952,8 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance) ...@@ -1829,6 +1952,8 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
vfree(instance->bulk_scratch); vfree(instance->bulk_scratch);
mmal_context_map_destroy(&instance->context_map);
kfree(instance); kfree(instance);
return status; return status;
...@@ -1888,6 +2013,13 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance) ...@@ -1888,6 +2013,13 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
instance->bulk_scratch = vmalloc(PAGE_SIZE); instance->bulk_scratch = vmalloc(PAGE_SIZE);
status = mmal_context_map_init(&instance->context_map);
if (status) {
pr_err("Failed to init context map (status=%d)\n", status);
kfree(instance);
return status;
}
params.callback_param = instance; params.callback_param = instance;
status = vchi_service_open(vchi_instance, &params, &instance->handle); status = vchi_service_open(vchi_instance, &params, &instance->handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment