Commit 52fcb3ec authored by Andy Walls's avatar Andy Walls Committed by Mauro Carvalho Chehab

V4L/DVB (13429): cx18: Add Memory Descriptor List (MDL) layer to buffer handling

Add a Memory Descriptor List (MDL) layer to buffer handling to implement
scatter-gather I/O.  Currently there is still only 1 buffer per MDL.
Signed-off-by: default avatarAndy Walls <awalls@radix.net>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@redhat.com>
parent fa655dda
...@@ -669,6 +669,12 @@ static int __devinit cx18_init_struct1(struct cx18 *cx) ...@@ -669,6 +669,12 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
cx->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; cx->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE;
cx->vbi.sliced_in = &cx->vbi.in.fmt.sliced; cx->vbi.sliced_in = &cx->vbi.in.fmt.sliced;
/* IVTV style VBI insertion into MPEG streams */
INIT_LIST_HEAD(&cx->vbi.sliced_mpeg_buf.list);
INIT_LIST_HEAD(&cx->vbi.sliced_mpeg_mdl.list);
INIT_LIST_HEAD(&cx->vbi.sliced_mpeg_mdl.buf_list);
list_add(&cx->vbi.sliced_mpeg_buf.list,
&cx->vbi.sliced_mpeg_mdl.buf_list);
return 0; return 0;
} }
......
...@@ -246,8 +246,8 @@ struct cx18_options { ...@@ -246,8 +246,8 @@ struct cx18_options {
int radio; /* enable/disable radio */ int radio; /* enable/disable radio */
}; };
/* per-buffer bit flags */ /* per-mdl bit flags */
#define CX18_F_B_NEED_BUF_SWAP 0 /* this buffer should be byte swapped */ #define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */
/* per-stream, s_flags */ /* per-stream, s_flags */
#define CX18_F_S_CLAIMED 3 /* this stream is claimed */ #define CX18_F_S_CLAIMED 3 /* this stream is claimed */
...@@ -274,15 +274,26 @@ struct cx18_options { ...@@ -274,15 +274,26 @@ struct cx18_options {
struct cx18_buffer { struct cx18_buffer {
struct list_head list; struct list_head list;
dma_addr_t dma_handle; dma_addr_t dma_handle;
u32 id;
unsigned long b_flags;
unsigned skipped;
char *buf; char *buf;
u32 bytesused; u32 bytesused;
u32 readpos; u32 readpos;
}; };
struct cx18_mdl {
struct list_head list;
u32 id; /* index into cx->scb->cpu_mdl[] of 1st cx18_mdl_ent */
unsigned int skipped;
unsigned long m_flags;
struct list_head buf_list;
struct cx18_buffer *curr_buf; /* current buffer in list for reading */
u32 bytesused;
u32 readpos;
};
struct cx18_queue { struct cx18_queue {
struct list_head list; struct list_head list;
atomic_t depth; atomic_t depth;
...@@ -346,14 +357,20 @@ struct cx18_stream { ...@@ -346,14 +357,20 @@ struct cx18_stream {
PCI_DMA_NONE */ PCI_DMA_NONE */
wait_queue_head_t waitq; wait_queue_head_t waitq;
/* Buffer Stats */ /* Buffers */
u32 buffers; struct list_head buf_pool; /* buffers not attached to an MDL */
u32 buf_size; u32 buffers; /* total buffers owned by this stream */
u32 buf_size; /* size in bytes of a single buffer */
/* MDL sizes - all stream MDLs are the same size */
u32 bufs_per_mdl;
u32 mdl_size; /* total bytes in all buffers in a mdl */
/* Buffer Queues */ /* MDL Queues */
struct cx18_queue q_free; /* free buffers */ struct cx18_queue q_free; /* free - in rotation, not committed */
struct cx18_queue q_busy; /* busy buffers - in use by firmware */ struct cx18_queue q_busy; /* busy - in use by firmware */
struct cx18_queue q_full; /* full buffers - data for user apps */ struct cx18_queue q_full; /* full - data for user apps */
struct cx18_queue q_idle; /* idle - not in rotation */
struct work_struct out_work_order; struct work_struct out_work_order;
...@@ -481,10 +498,11 @@ struct vbi_info { ...@@ -481,10 +498,11 @@ struct vbi_info {
u32 inserted_frame; u32 inserted_frame;
/* /*
* A dummy driver stream transfer buffer with a copy of the next * A dummy driver stream transfer mdl & buffer with a copy of the next
* sliced_mpeg_data[] buffer for output to userland apps. * sliced_mpeg_data[] buffer for output to userland apps.
* Only used in cx18-fileops.c, but its state needs to persist at times. * Only used in cx18-fileops.c, but its state needs to persist at times.
*/ */
struct cx18_mdl sliced_mpeg_mdl;
struct cx18_buffer sliced_mpeg_buf; struct cx18_buffer sliced_mpeg_buf;
}; };
...@@ -511,7 +529,6 @@ struct cx18 { ...@@ -511,7 +529,6 @@ struct cx18 {
u8 is_60hz; u8 is_60hz;
u8 nof_inputs; /* number of video inputs */ u8 nof_inputs; /* number of video inputs */
u8 nof_audio_inputs; /* number of audio inputs */ u8 nof_audio_inputs; /* number of audio inputs */
u16 buffer_id; /* buffer ID counter */
u32 v4l2_cap; /* V4L2 capabilities of card */ u32 v4l2_cap; /* V4L2 capabilities of card */
u32 hw_flags; /* Hardware description of the board */ u32 hw_flags; /* Hardware description of the board */
unsigned int free_mdl_idx; unsigned int free_mdl_idx;
......
...@@ -166,11 +166,12 @@ static void cx18_dualwatch(struct cx18 *cx) ...@@ -166,11 +166,12 @@ static void cx18_dualwatch(struct cx18 *cx)
} }
static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block, int *err) static struct cx18_mdl *cx18_get_mdl(struct cx18_stream *s, int non_block,
int *err)
{ {
struct cx18 *cx = s->cx; struct cx18 *cx = s->cx;
struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI]; struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
struct cx18_buffer *buf; struct cx18_mdl *mdl;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
*err = 0; *err = 0;
...@@ -185,32 +186,33 @@ static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block, ...@@ -185,32 +186,33 @@ static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block,
} }
if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) && if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
!test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) { !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) { while ((mdl = cx18_dequeue(s_vbi,
&s_vbi->q_full))) {
/* byteswap and process VBI data */ /* byteswap and process VBI data */
cx18_process_vbi_data(cx, buf, cx18_process_vbi_data(cx, mdl,
s_vbi->type); s_vbi->type);
cx18_stream_put_buf_fw(s_vbi, buf); cx18_stream_put_mdl_fw(s_vbi, mdl);
} }
} }
buf = &cx->vbi.sliced_mpeg_buf; mdl = &cx->vbi.sliced_mpeg_mdl;
if (buf->readpos != buf->bytesused) if (mdl->readpos != mdl->bytesused)
return buf; return mdl;
} }
/* do we have new data? */ /* do we have new data? */
buf = cx18_dequeue(s, &s->q_full); mdl = cx18_dequeue(s, &s->q_full);
if (buf) { if (mdl) {
if (!test_and_clear_bit(CX18_F_B_NEED_BUF_SWAP, if (!test_and_clear_bit(CX18_F_M_NEED_SWAP,
&buf->b_flags)) &mdl->m_flags))
return buf; return mdl;
if (s->type == CX18_ENC_STREAM_TYPE_MPG) if (s->type == CX18_ENC_STREAM_TYPE_MPG)
/* byteswap MPG data */ /* byteswap MPG data */
cx18_buf_swap(buf); cx18_mdl_swap(mdl);
else { else {
/* byteswap and process VBI data */ /* byteswap and process VBI data */
cx18_process_vbi_data(cx, buf, s->type); cx18_process_vbi_data(cx, mdl, s->type);
} }
return buf; return mdl;
} }
/* return if end of stream */ /* return if end of stream */
...@@ -241,21 +243,28 @@ static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block, ...@@ -241,21 +243,28 @@ static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block,
} }
} }
static void cx18_setup_sliced_vbi_buf(struct cx18 *cx) static void cx18_setup_sliced_vbi_mdl(struct cx18 *cx)
{ {
struct cx18_mdl *mdl = &cx->vbi.sliced_mpeg_mdl;
struct cx18_buffer *buf = &cx->vbi.sliced_mpeg_buf;
int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
cx->vbi.sliced_mpeg_buf.buf = cx->vbi.sliced_mpeg_data[idx]; buf->buf = cx->vbi.sliced_mpeg_data[idx];
cx->vbi.sliced_mpeg_buf.bytesused = cx->vbi.sliced_mpeg_size[idx]; buf->bytesused = cx->vbi.sliced_mpeg_size[idx];
cx->vbi.sliced_mpeg_buf.readpos = 0; buf->readpos = 0;
mdl->curr_buf = NULL;
mdl->bytesused = cx->vbi.sliced_mpeg_size[idx];
mdl->readpos = 0;
} }
static size_t cx18_copy_buf_to_user(struct cx18_stream *s, static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
struct cx18_buffer *buf, char __user *ubuf, size_t ucount) struct cx18_buffer *buf, char __user *ubuf, size_t ucount, bool *stop)
{ {
struct cx18 *cx = s->cx; struct cx18 *cx = s->cx;
size_t len = buf->bytesused - buf->readpos; size_t len = buf->bytesused - buf->readpos;
*stop = false;
if (len > ucount) if (len > ucount)
len = ucount; len = ucount;
if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG && if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG &&
...@@ -335,7 +344,8 @@ static size_t cx18_copy_buf_to_user(struct cx18_stream *s, ...@@ -335,7 +344,8 @@ static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
/* We declare we actually found a Program Pack*/ /* We declare we actually found a Program Pack*/
cx->search_pack_header = 0; /* expect vid PES */ cx->search_pack_header = 0; /* expect vid PES */
len = (char *)q - start; len = (char *)q - start;
cx18_setup_sliced_vbi_buf(cx); cx18_setup_sliced_vbi_mdl(cx);
*stop = true;
break; break;
} }
} }
...@@ -352,6 +362,60 @@ static size_t cx18_copy_buf_to_user(struct cx18_stream *s, ...@@ -352,6 +362,60 @@ static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
return len; return len;
} }
/**
* list_entry_is_past_end - check if a previous loop cursor is off list end
* @pos: the type * previously used as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*
* Check if the entry's list_head is the head of the list, thus it's not a
* real entry but was the loop cursor that walked past the end
*/
#define list_entry_is_past_end(pos, head, member) \
(&pos->member == (head))
static size_t cx18_copy_mdl_to_user(struct cx18_stream *s,
struct cx18_mdl *mdl, char __user *ubuf, size_t ucount)
{
size_t tot_written = 0;
int rc;
bool stop = false;
if (mdl->curr_buf == NULL)
mdl->curr_buf = list_first_entry(&mdl->buf_list,
struct cx18_buffer, list);
if (list_entry_is_past_end(mdl->curr_buf, &mdl->buf_list, list)) {
/*
* For some reason we've exhausted the buffers, but the MDL
* object still said some data was unread.
* Fix that and bail out.
*/
mdl->readpos = mdl->bytesused;
return 0;
}
list_for_each_entry_from(mdl->curr_buf, &mdl->buf_list, list) {
if (mdl->curr_buf->readpos >= mdl->curr_buf->bytesused)
continue;
rc = cx18_copy_buf_to_user(s, mdl->curr_buf, ubuf + tot_written,
ucount - tot_written, &stop);
if (rc < 0)
return rc;
mdl->readpos += rc;
tot_written += rc;
if (stop || /* Forced stopping point for VBI insertion */
tot_written >= ucount || /* Reader request statisfied */
mdl->curr_buf->readpos < mdl->curr_buf->bytesused ||
mdl->readpos >= mdl->bytesused) /* MDL buffers drained */
break;
}
return tot_written;
}
static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf, static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
size_t tot_count, int non_block) size_t tot_count, int non_block)
{ {
...@@ -373,12 +437,12 @@ static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf, ...@@ -373,12 +437,12 @@ static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
single_frame = 1; single_frame = 1;
for (;;) { for (;;) {
struct cx18_buffer *buf; struct cx18_mdl *mdl;
int rc; int rc;
buf = cx18_get_buffer(s, non_block, &rc); mdl = cx18_get_mdl(s, non_block, &rc);
/* if there is no data available... */ /* if there is no data available... */
if (buf == NULL) { if (mdl == NULL) {
/* if we got data, then return that regardless */ /* if we got data, then return that regardless */
if (tot_written) if (tot_written)
break; break;
...@@ -392,20 +456,20 @@ static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf, ...@@ -392,20 +456,20 @@ static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
return rc; return rc;
} }
rc = cx18_copy_buf_to_user(s, buf, ubuf + tot_written, rc = cx18_copy_mdl_to_user(s, mdl, ubuf + tot_written,
tot_count - tot_written); tot_count - tot_written);
if (buf != &cx->vbi.sliced_mpeg_buf) { if (mdl != &cx->vbi.sliced_mpeg_mdl) {
if (buf->readpos == buf->bytesused) if (mdl->readpos == mdl->bytesused)
cx18_stream_put_buf_fw(s, buf); cx18_stream_put_mdl_fw(s, mdl);
else else
cx18_push(s, buf, &s->q_full); cx18_push(s, mdl, &s->q_full);
} else if (buf->readpos == buf->bytesused) { } else if (mdl->readpos == mdl->bytesused) {
int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES; int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
cx->vbi.sliced_mpeg_size[idx] = 0; cx->vbi.sliced_mpeg_size[idx] = 0;
cx->vbi.inserted_frame++; cx->vbi.inserted_frame++;
cx->vbi_data_inserted += buf->bytesused; cx->vbi_data_inserted += mdl->bytesused;
} }
if (rc < 0) if (rc < 0)
return rc; return rc;
......
...@@ -910,7 +910,8 @@ static int cx18_log_status(struct file *file, void *fh) ...@@ -910,7 +910,8 @@ static int cx18_log_status(struct file *file, void *fh)
continue; continue;
CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n",
s->name, s->s_flags, s->name, s->s_flags,
atomic_read(&s->q_full.depth) * 100 / s->buffers, atomic_read(&s->q_full.depth) * s->bufs_per_mdl * 100
/ s->buffers,
(s->buffers * s->buf_size) / 1024, s->buffers); (s->buffers * s->buf_size) / 1024, s->buffers);
} }
CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n", CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n",
......
...@@ -131,13 +131,39 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name) ...@@ -131,13 +131,39 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
* Functions that run in a work_queue work handling context * Functions that run in a work_queue work handling context
*/ */
static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl)
{
struct cx18_buffer *buf;
if (!s->dvb.enabled || mdl->bytesused == 0)
return;
/* We ignore mdl and buf readpos accounting here - it doesn't matter */
/* The likely case */
if (list_is_singular(&mdl->buf_list)) {
buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
list);
if (buf->bytesused)
dvb_dmx_swfilter(&s->dvb.demux,
buf->buf, buf->bytesused);
return;
}
list_for_each_entry(buf, &mdl->buf_list, list) {
if (buf->bytesused == 0)
break;
dvb_dmx_swfilter(&s->dvb.demux, buf->buf, buf->bytesused);
}
}
static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
{ {
u32 handle, mdl_ack_count, id; u32 handle, mdl_ack_count, id;
struct cx18_mailbox *mb; struct cx18_mailbox *mb;
struct cx18_mdl_ack *mdl_ack; struct cx18_mdl_ack *mdl_ack;
struct cx18_stream *s; struct cx18_stream *s;
struct cx18_buffer *buf; struct cx18_mdl *mdl;
int i; int i;
mb = &order->mb; mb = &order->mb;
...@@ -158,7 +184,7 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) ...@@ -158,7 +184,7 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
id = mdl_ack->id; id = mdl_ack->id;
/* /*
* Simple integrity check for processing a stale (and possibly * Simple integrity check for processing a stale (and possibly
* inconsistent mailbox): make sure the buffer id is in the * inconsistent mailbox): make sure the MDL id is in the
* valid range for the stream. * valid range for the stream.
* *
* We go through the trouble of dealing with stale mailboxes * We go through the trouble of dealing with stale mailboxes
...@@ -169,44 +195,42 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) ...@@ -169,44 +195,42 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
* There are occasions when we get a half changed mailbox, * There are occasions when we get a half changed mailbox,
* which this check catches for a handle & id mismatch. If the * which this check catches for a handle & id mismatch. If the
* handle and id do correspond, the worst case is that we * handle and id do correspond, the worst case is that we
* completely lost the old buffer, but pick up the new buffer * completely lost the old MDL, but pick up the new MDL
* early (but the new mdl_ack is guaranteed to be good in this * early (but the new mdl_ack is guaranteed to be good in this
* case as the firmware wouldn't point us to a new mdl_ack until * case as the firmware wouldn't point us to a new mdl_ack until
* it's filled in). * it's filled in).
* *
* cx18_queue_get buf() will detect the lost buffers * cx18_queue_get_mdl() will detect the lost MDLs
* and send them back to q_free for fw rotation eventually. * and send them back to q_free for fw rotation eventually.
*/ */
if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
!(id >= s->mdl_base_idx && !(id >= s->mdl_base_idx &&
id < (s->mdl_base_idx + s->buffers))) { id < (s->mdl_base_idx + s->buffers))) {
CX18_WARN("Fell behind! Ignoring stale mailbox with " CX18_WARN("Fell behind! Ignoring stale mailbox with "
" inconsistent data. Lost buffer for mailbox " " inconsistent data. Lost MDL for mailbox "
"seq no %d\n", mb->request); "seq no %d\n", mb->request);
break; break;
} }
buf = cx18_queue_get_buf(s, id, mdl_ack->data_used); mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id); CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s->name, id);
if (buf == NULL) { if (mdl == NULL) {
CX18_WARN("Could not find buf %d for stream %s\n", CX18_WARN("Could not find MDL %d for stream %s\n",
id, s->name); id, s->name);
continue; continue;
} }
CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n", CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
s->name, buf->bytesused); s->name, mdl->bytesused);
if (s->type != CX18_ENC_STREAM_TYPE_TS) if (s->type != CX18_ENC_STREAM_TYPE_TS)
cx18_enqueue(s, buf, &s->q_full); cx18_enqueue(s, mdl, &s->q_full);
else { else {
if (s->dvb.enabled) cx18_mdl_send_to_dvb(s, mdl);
dvb_dmx_swfilter(&s->dvb.demux, buf->buf, cx18_enqueue(s, mdl, &s->q_free);
buf->bytesused);
cx18_enqueue(s, buf, &s->q_free);
} }
} }
/* Put as many buffers as possible back into fw use */ /* Put as many MDLs as possible back into fw use */
cx18_stream_load_fw_queue(s); cx18_stream_load_fw_queue(s);
wake_up(&cx->dma_waitq); wake_up(&cx->dma_waitq);
...@@ -616,7 +640,7 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[]) ...@@ -616,7 +640,7 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
/* /*
* Wait for XPU to perform extra actions for the caller in some cases. * Wait for XPU to perform extra actions for the caller in some cases.
* e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all buffers * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs
* back in a burst shortly thereafter * back in a burst shortly thereafter
*/ */
if (info->flags & API_SLOW) if (info->flags & API_SLOW)
......
...@@ -39,14 +39,14 @@ ...@@ -39,14 +39,14 @@
struct cx18; struct cx18;
/* /*
* This structure is used by CPU to provide completed buffers information * This structure is used by CPU to provide completed MDL & buffers information.
* Its structure is dictrated by the layout of the SCB, required by the * Its structure is dictated by the layout of the SCB, required by the
* firmware, but its defintion needs to be here, instead of in cx18-scb.h, * firmware, but its defintion needs to be here, instead of in cx18-scb.h,
* for mailbox work order scheduling * for mailbox work order scheduling
*/ */
struct cx18_mdl_ack { struct cx18_mdl_ack {
u32 id; /* ID of a completed MDL */ u32 id; /* ID of a completed MDL */
u32 data_used; /* Total data filled in the MDL for buffer 'id' */ u32 data_used; /* Total data filled in the MDL with 'id' */
}; };
/* The cx18_mailbox struct is the mailbox structure which is used for passing /* The cx18_mailbox struct is the mailbox structure which is used for passing
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "cx18-queue.h" #include "cx18-queue.h"
#include "cx18-streams.h" #include "cx18-streams.h"
#include "cx18-scb.h" #include "cx18-scb.h"
#include "cx18-io.h"
void cx18_buf_swap(struct cx18_buffer *buf) void cx18_buf_swap(struct cx18_buffer *buf)
{ {
...@@ -35,6 +36,17 @@ void cx18_buf_swap(struct cx18_buffer *buf) ...@@ -35,6 +36,17 @@ void cx18_buf_swap(struct cx18_buffer *buf)
swab32s((u32 *)(buf->buf + i)); swab32s((u32 *)(buf->buf + i));
} }
void _cx18_mdl_swap(struct cx18_mdl *mdl)
{
struct cx18_buffer *buf;
list_for_each_entry(buf, &mdl->buf_list, list) {
if (buf->bytesused == 0)
break;
cx18_buf_swap(buf);
}
}
void cx18_queue_init(struct cx18_queue *q) void cx18_queue_init(struct cx18_queue *q)
{ {
INIT_LIST_HEAD(&q->list); INIT_LIST_HEAD(&q->list);
...@@ -42,15 +54,16 @@ void cx18_queue_init(struct cx18_queue *q) ...@@ -42,15 +54,16 @@ void cx18_queue_init(struct cx18_queue *q)
q->bytesused = 0; q->bytesused = 0;
} }
struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
struct cx18_queue *q, int to_front) struct cx18_queue *q, int to_front)
{ {
/* clear the buffer if it is not to be enqueued to the full queue */ /* clear the mdl if it is not to be enqueued to the full queue */
if (q != &s->q_full) { if (q != &s->q_full) {
buf->bytesused = 0; mdl->bytesused = 0;
buf->readpos = 0; mdl->readpos = 0;
buf->b_flags = 0; mdl->m_flags = 0;
buf->skipped = 0; mdl->skipped = 0;
mdl->curr_buf = NULL;
} }
/* q_busy is restricted to a max buffer count imposed by firmware */ /* q_busy is restricted to a max buffer count imposed by firmware */
...@@ -61,125 +74,270 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, ...@@ -61,125 +74,270 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
spin_lock(&q->lock); spin_lock(&q->lock);
if (to_front) if (to_front)
list_add(&buf->list, &q->list); /* LIFO */ list_add(&mdl->list, &q->list); /* LIFO */
else else
list_add_tail(&buf->list, &q->list); /* FIFO */ list_add_tail(&mdl->list, &q->list); /* FIFO */
q->bytesused += buf->bytesused - buf->readpos; q->bytesused += mdl->bytesused - mdl->readpos;
atomic_inc(&q->depth); atomic_inc(&q->depth);
spin_unlock(&q->lock); spin_unlock(&q->lock);
return q; return q;
} }
struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q) struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
{ {
struct cx18_buffer *buf = NULL; struct cx18_mdl *mdl = NULL;
spin_lock(&q->lock); spin_lock(&q->lock);
if (!list_empty(&q->list)) { if (!list_empty(&q->list)) {
buf = list_first_entry(&q->list, struct cx18_buffer, list); mdl = list_first_entry(&q->list, struct cx18_mdl, list);
list_del_init(&buf->list); list_del_init(&mdl->list);
q->bytesused -= buf->bytesused - buf->readpos; q->bytesused -= mdl->bytesused - mdl->readpos;
buf->skipped = 0; mdl->skipped = 0;
atomic_dec(&q->depth); atomic_dec(&q->depth);
} }
spin_unlock(&q->lock); spin_unlock(&q->lock);
return buf; return mdl;
}
static void _cx18_mdl_set_buf_bytesused(struct cx18_stream *s,
struct cx18_mdl *mdl)
{
struct cx18_buffer *buf;
u32 buf_size = s->buf_size;
u32 bytesused = mdl->bytesused;
list_for_each_entry(buf, &mdl->buf_list, list) {
buf->readpos = 0;
if (bytesused >= buf_size) {
buf->bytesused = buf_size;
bytesused -= buf_size;
} else {
buf->bytesused = bytesused;
bytesused = 0;
}
}
} }
struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id, static inline void cx18_mdl_set_buf_bytesused(struct cx18_stream *s,
struct cx18_mdl *mdl)
{
struct cx18_buffer *buf;
if (list_is_singular(&mdl->buf_list)) {
buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
list);
buf->bytesused = mdl->bytesused;
buf->readpos = 0;
} else {
_cx18_mdl_set_buf_bytesused(s, mdl);
}
}
struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
u32 bytesused) u32 bytesused)
{ {
struct cx18 *cx = s->cx; struct cx18 *cx = s->cx;
struct cx18_buffer *buf; struct cx18_mdl *mdl;
struct cx18_buffer *tmp; struct cx18_mdl *tmp;
struct cx18_buffer *ret = NULL; struct cx18_mdl *ret = NULL;
LIST_HEAD(sweep_up); LIST_HEAD(sweep_up);
/* /*
* We don't have to acquire multiple q locks here, because we are * We don't have to acquire multiple q locks here, because we are
* serialized by the single threaded work handler. * serialized by the single threaded work handler.
* Buffers from the firmware will thus remain in order as * MDLs from the firmware will thus remain in order as
* they are moved from q_busy to q_full or to the dvb ring buffer. * they are moved from q_busy to q_full or to the dvb ring buffer.
*/ */
spin_lock(&s->q_busy.lock); spin_lock(&s->q_busy.lock);
list_for_each_entry_safe(buf, tmp, &s->q_busy.list, list) { list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) {
/* /*
* We should find what the firmware told us is done, * We should find what the firmware told us is done,
* right at the front of the queue. If we don't, we likely have * right at the front of the queue. If we don't, we likely have
* missed a buffer done message from the firmware. * missed an mdl done message from the firmware.
* Once we skip a buffer repeatedly, relative to the size of * Once we skip an mdl repeatedly, relative to the size of
* q_busy, we have high confidence we've missed it. * q_busy, we have high confidence we've missed it.
*/ */
if (buf->id != id) { if (mdl->id != id) {
buf->skipped++; mdl->skipped++;
if (buf->skipped >= atomic_read(&s->q_busy.depth)-1) { if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
/* buffer must have fallen out of rotation */ /* mdl must have fallen out of rotation */
CX18_WARN("Skipped %s, buffer %d, %d " CX18_WARN("Skipped %s, MDL %d, %d "
"times - it must have dropped out of " "times - it must have dropped out of "
"rotation\n", s->name, buf->id, "rotation\n", s->name, mdl->id,
buf->skipped); mdl->skipped);
/* Sweep it up to put it back into rotation */ /* Sweep it up to put it back into rotation */
list_move_tail(&buf->list, &sweep_up); list_move_tail(&mdl->list, &sweep_up);
atomic_dec(&s->q_busy.depth); atomic_dec(&s->q_busy.depth);
} }
continue; continue;
} }
/* /*
* We pull the desired buffer off of the queue here. Something * We pull the desired mdl off of the queue here. Something
* will have to put it back on a queue later. * will have to put it back on a queue later.
*/ */
list_del_init(&buf->list); list_del_init(&mdl->list);
atomic_dec(&s->q_busy.depth); atomic_dec(&s->q_busy.depth);
ret = buf; ret = mdl;
break; break;
} }
spin_unlock(&s->q_busy.lock); spin_unlock(&s->q_busy.lock);
/* /*
* We found the buffer for which we were looking. Get it ready for * We found the mdl for which we were looking. Get it ready for
* the caller to put on q_full or in the dvb ring buffer. * the caller to put on q_full or in the dvb ring buffer.
*/ */
if (ret != NULL) { if (ret != NULL) {
ret->bytesused = bytesused; ret->bytesused = bytesused;
ret->skipped = 0; ret->skipped = 0;
/* readpos and b_flags were 0'ed when the buf went on q_busy */ /* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
cx18_buf_sync_for_cpu(s, ret); cx18_mdl_set_buf_bytesused(s, ret);
cx18_mdl_sync_for_cpu(s, ret);
if (s->type != CX18_ENC_STREAM_TYPE_TS) if (s->type != CX18_ENC_STREAM_TYPE_TS)
set_bit(CX18_F_B_NEED_BUF_SWAP, &ret->b_flags); set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);
} }
/* Put any buffers the firmware is ignoring back into normal rotation */ /* Put any mdls the firmware is ignoring back into normal rotation */
list_for_each_entry_safe(buf, tmp, &sweep_up, list) { list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {
list_del_init(&buf->list); list_del_init(&mdl->list);
cx18_enqueue(s, buf, &s->q_free); cx18_enqueue(s, mdl, &s->q_free);
} }
return ret; return ret;
} }
/* Move all buffers of a queue to q_free, while flushing the buffers */ /* Move all mdls of a queue, while flushing the mdl */
static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q) static void cx18_queue_flush(struct cx18_stream *s,
struct cx18_queue *q_src, struct cx18_queue *q_dst)
{ {
struct cx18_buffer *buf; struct cx18_mdl *mdl;
if (q == &s->q_free) /* It only makes sense to flush to q_free or q_idle */
if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy)
return; return;
spin_lock(&q->lock); spin_lock(&q_src->lock);
while (!list_empty(&q->list)) { spin_lock(&q_dst->lock);
buf = list_first_entry(&q->list, struct cx18_buffer, list); while (!list_empty(&q_src->list)) {
list_move_tail(&buf->list, &s->q_free.list); mdl = list_first_entry(&q_src->list, struct cx18_mdl, list);
buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0; list_move_tail(&mdl->list, &q_dst->list);
atomic_inc(&s->q_free.depth); mdl->bytesused = 0;
mdl->readpos = 0;
mdl->m_flags = 0;
mdl->skipped = 0;
mdl->curr_buf = NULL;
atomic_inc(&q_dst->depth);
} }
cx18_queue_init(q); cx18_queue_init(q_src);
spin_unlock(&q->lock); spin_unlock(&q_src->lock);
spin_unlock(&q_dst->lock);
} }
void cx18_flush_queues(struct cx18_stream *s) void cx18_flush_queues(struct cx18_stream *s)
{ {
cx18_queue_flush(s, &s->q_busy); cx18_queue_flush(s, &s->q_busy, &s->q_free);
cx18_queue_flush(s, &s->q_full); cx18_queue_flush(s, &s->q_full, &s->q_free);
}
/*
* Note, s->buf_pool is not protected by a lock,
* the stream better not have *anything* going on when calling this
*/
void cx18_unload_queues(struct cx18_stream *s)
{
struct cx18_queue *q_idle = &s->q_idle;
struct cx18_mdl *mdl;
struct cx18_buffer *buf;
/* Move all MDLS to q_idle */
cx18_queue_flush(s, &s->q_busy, q_idle);
cx18_queue_flush(s, &s->q_full, q_idle);
cx18_queue_flush(s, &s->q_free, q_idle);
/* Reset MDL id's and move all buffers back to the stream's buf_pool */
spin_lock(&q_idle->lock);
list_for_each_entry(mdl, &q_idle->list, list) {
while (!list_empty(&mdl->buf_list)) {
buf = list_first_entry(&mdl->buf_list,
struct cx18_buffer, list);
list_move_tail(&buf->list, &s->buf_pool);
buf->bytesused = 0;
buf->readpos = 0;
}
mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */
/* all other mdl fields were cleared by cx18_queue_flush() */
}
spin_unlock(&q_idle->lock);
}
/*
* Note, s->buf_pool is not protected by a lock,
* the stream better not have *anything* going on when calling this
*/
void cx18_load_queues(struct cx18_stream *s)
{
struct cx18 *cx = s->cx;
struct cx18_mdl *mdl;
struct cx18_buffer *buf;
int mdl_id;
int i;
/*
* Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free
* Excess MDLs are left on q_idle
* Excess buffers are left in buf_pool and/or on an MDL in q_idle
*/
mdl_id = s->mdl_base_idx;
for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;
mdl != NULL && i == s->bufs_per_mdl;
mdl = cx18_dequeue(s, &s->q_idle)) {
mdl->id = mdl_id;
for (i = 0; i < s->bufs_per_mdl; i++) {
if (list_empty(&s->buf_pool))
break;
buf = list_first_entry(&s->buf_pool, struct cx18_buffer,
list);
list_move_tail(&buf->list, &mdl->buf_list);
/* update the firmware's MDL array with this buffer */
cx18_writel(cx, buf->dma_handle,
&cx->scb->cpu_mdl[mdl_id + i].paddr);
cx18_writel(cx, s->buf_size,
&cx->scb->cpu_mdl[mdl_id + i].length);
}
if (i == s->bufs_per_mdl)
cx18_enqueue(s, mdl, &s->q_free);
else
cx18_push(s, mdl, &s->q_idle); /* not enough buffers */
mdl_id += i;
}
}
void _cx18_mdl_sync_for_cpu(struct cx18_stream *s, struct cx18_mdl *mdl)
{
int dma = s->dma;
u32 buf_size = s->buf_size;
struct pci_dev *pci_dev = s->cx->pci_dev;
struct cx18_buffer *buf;
list_for_each_entry(buf, &mdl->buf_list, list)
pci_dma_sync_single_for_cpu(pci_dev, buf->dma_handle,
buf_size, dma);
}
void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
{
int dma = s->dma;
u32 buf_size = s->buf_size;
struct pci_dev *pci_dev = s->cx->pci_dev;
struct cx18_buffer *buf;
list_for_each_entry(buf, &mdl->buf_list, list)
pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,
buf_size, dma);
} }
int cx18_stream_alloc(struct cx18_stream *s) int cx18_stream_alloc(struct cx18_stream *s)
...@@ -207,24 +365,40 @@ int cx18_stream_alloc(struct cx18_stream *s) ...@@ -207,24 +365,40 @@ int cx18_stream_alloc(struct cx18_stream *s)
s->mdl_base_idx = cx->free_mdl_idx; s->mdl_base_idx = cx->free_mdl_idx;
/* allocate stream buffers. Initially all buffers are in q_free. */ /* allocate stream buffers and MDLs */
for (i = 0; i < s->buffers; i++) { for (i = 0; i < s->buffers; i++) {
struct cx18_buffer *buf = kzalloc(sizeof(struct cx18_buffer), struct cx18_mdl *mdl;
GFP_KERNEL|__GFP_NOWARN); struct cx18_buffer *buf;
if (buf == NULL) /* 1 MDL per buffer to handle the worst & also default case */
mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN);
if (mdl == NULL)
break; break;
buf = kzalloc(sizeof(struct cx18_buffer),
GFP_KERNEL|__GFP_NOWARN);
if (buf == NULL) {
kfree(mdl);
break;
}
buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN); buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
if (buf->buf == NULL) { if (buf->buf == NULL) {
kfree(mdl);
kfree(buf); kfree(buf);
break; break;
} }
buf->id = cx->buffer_id++;
INIT_LIST_HEAD(&mdl->list);
INIT_LIST_HEAD(&mdl->buf_list);
mdl->id = s->mdl_base_idx; /* a somewhat safe value */
cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list); INIT_LIST_HEAD(&buf->list);
buf->dma_handle = pci_map_single(s->cx->pci_dev, buf->dma_handle = pci_map_single(s->cx->pci_dev,
buf->buf, s->buf_size, s->dma); buf->buf, s->buf_size, s->dma);
cx18_buf_sync_for_cpu(s, buf); cx18_buf_sync_for_cpu(s, buf);
cx18_enqueue(s, buf, &s->q_free); list_add_tail(&buf->list, &s->buf_pool);
} }
if (i == s->buffers) { if (i == s->buffers) {
cx->free_mdl_idx += s->buffers; cx->free_mdl_idx += s->buffers;
...@@ -237,13 +411,21 @@ int cx18_stream_alloc(struct cx18_stream *s) ...@@ -237,13 +411,21 @@ int cx18_stream_alloc(struct cx18_stream *s)
void cx18_stream_free(struct cx18_stream *s) void cx18_stream_free(struct cx18_stream *s)
{ {
struct cx18_mdl *mdl;
struct cx18_buffer *buf; struct cx18_buffer *buf;
/* move all buffers to q_free */ /* move all buffers to buf_pool and all MDLs to q_idle */
cx18_flush_queues(s); cx18_unload_queues(s);
/* empty q_idle */
while ((mdl = cx18_dequeue(s, &s->q_idle)))
kfree(mdl);
/* empty buf_pool */
while (!list_empty(&s->buf_pool)) {
buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
list_del_init(&buf->list);
/* empty q_free */
while ((buf = cx18_dequeue(s, &s->q_free))) {
pci_unmap_single(s->cx->pci_dev, buf->dma_handle, pci_unmap_single(s->cx->pci_dev, buf->dma_handle,
s->buf_size, s->dma); s->buf_size, s->dma);
kfree(buf->buf); kfree(buf->buf);
......
...@@ -33,6 +33,19 @@ static inline void cx18_buf_sync_for_cpu(struct cx18_stream *s, ...@@ -33,6 +33,19 @@ static inline void cx18_buf_sync_for_cpu(struct cx18_stream *s,
s->buf_size, s->dma); s->buf_size, s->dma);
} }
void _cx18_mdl_sync_for_cpu(struct cx18_stream *s, struct cx18_mdl *mdl);
static inline void cx18_mdl_sync_for_cpu(struct cx18_stream *s,
struct cx18_mdl *mdl)
{
if (list_is_singular(&mdl->buf_list))
cx18_buf_sync_for_cpu(s, list_first_entry(&mdl->buf_list,
struct cx18_buffer,
list));
else
_cx18_mdl_sync_for_cpu(s, mdl);
}
static inline void cx18_buf_sync_for_device(struct cx18_stream *s, static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
struct cx18_buffer *buf) struct cx18_buffer *buf)
{ {
...@@ -40,32 +53,59 @@ static inline void cx18_buf_sync_for_device(struct cx18_stream *s, ...@@ -40,32 +53,59 @@ static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
s->buf_size, s->dma); s->buf_size, s->dma);
} }
void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl);
static inline void cx18_mdl_sync_for_device(struct cx18_stream *s,
struct cx18_mdl *mdl)
{
if (list_is_singular(&mdl->buf_list))
cx18_buf_sync_for_device(s, list_first_entry(&mdl->buf_list,
struct cx18_buffer,
list));
else
_cx18_mdl_sync_for_device(s, mdl);
}
void cx18_buf_swap(struct cx18_buffer *buf); void cx18_buf_swap(struct cx18_buffer *buf);
void _cx18_mdl_swap(struct cx18_mdl *mdl);
static inline void cx18_mdl_swap(struct cx18_mdl *mdl)
{
if (list_is_singular(&mdl->buf_list))
cx18_buf_swap(list_first_entry(&mdl->buf_list,
struct cx18_buffer, list));
else
_cx18_mdl_swap(mdl);
}
/* cx18_queue utility functions */ /* cx18_queue utility functions */
struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
struct cx18_queue *q, int to_front); struct cx18_queue *q, int to_front);
static inline static inline
struct cx18_queue *cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, struct cx18_queue *cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
struct cx18_queue *q) struct cx18_queue *q)
{ {
return _cx18_enqueue(s, buf, q, 0); /* FIFO */ return _cx18_enqueue(s, mdl, q, 0); /* FIFO */
} }
static inline static inline
struct cx18_queue *cx18_push(struct cx18_stream *s, struct cx18_buffer *buf, struct cx18_queue *cx18_push(struct cx18_stream *s, struct cx18_mdl *mdl,
struct cx18_queue *q) struct cx18_queue *q)
{ {
return _cx18_enqueue(s, buf, q, 1); /* LIFO */ return _cx18_enqueue(s, mdl, q, 1); /* LIFO */
} }
void cx18_queue_init(struct cx18_queue *q); void cx18_queue_init(struct cx18_queue *q);
struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q); struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id, struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
u32 bytesused); u32 bytesused);
void cx18_flush_queues(struct cx18_stream *s); void cx18_flush_queues(struct cx18_stream *s);
/* queue MDL reconfiguration helpers */
void cx18_unload_queues(struct cx18_stream *s);
void cx18_load_queues(struct cx18_stream *s);
/* cx18_stream utility functions */ /* cx18_stream utility functions */
int cx18_stream_alloc(struct cx18_stream *s); int cx18_stream_alloc(struct cx18_stream *s);
void cx18_stream_free(struct cx18_stream *s); void cx18_stream_free(struct cx18_stream *s);
...@@ -115,6 +115,9 @@ static void cx18_stream_init(struct cx18 *cx, int type) ...@@ -115,6 +115,9 @@ static void cx18_stream_init(struct cx18 *cx, int type)
s->dma = cx18_stream_info[type].dma; s->dma = cx18_stream_info[type].dma;
s->buffers = cx->stream_buffers[type]; s->buffers = cx->stream_buffers[type];
s->buf_size = cx->stream_buf_size[type]; s->buf_size = cx->stream_buf_size[type];
INIT_LIST_HEAD(&s->buf_pool);
s->bufs_per_mdl = 1;
s->mdl_size = s->buf_size * s->bufs_per_mdl;
init_waitqueue_head(&s->waitq); init_waitqueue_head(&s->waitq);
s->id = -1; s->id = -1;
...@@ -124,6 +127,8 @@ static void cx18_stream_init(struct cx18 *cx, int type) ...@@ -124,6 +127,8 @@ static void cx18_stream_init(struct cx18 *cx, int type)
cx18_queue_init(&s->q_busy); cx18_queue_init(&s->q_busy);
spin_lock_init(&s->q_full.lock); spin_lock_init(&s->q_full.lock);
cx18_queue_init(&s->q_full); cx18_queue_init(&s->q_full);
spin_lock_init(&s->q_idle.lock);
cx18_queue_init(&s->q_idle);
INIT_WORK(&s->out_work_order, cx18_out_work_handler); INIT_WORK(&s->out_work_order, cx18_out_work_handler);
} }
...@@ -441,8 +446,8 @@ static void cx18_vbi_setup(struct cx18_stream *s) ...@@ -441,8 +446,8 @@ static void cx18_vbi_setup(struct cx18_stream *s)
} }
static static
struct cx18_queue *_cx18_stream_put_buf_fw(struct cx18_stream *s, struct cx18_queue *_cx18_stream_put_mdl_fw(struct cx18_stream *s,
struct cx18_buffer *buf) struct cx18_mdl *mdl)
{ {
struct cx18 *cx = s->cx; struct cx18 *cx = s->cx;
struct cx18_queue *q; struct cx18_queue *q;
...@@ -451,16 +456,16 @@ struct cx18_queue *_cx18_stream_put_buf_fw(struct cx18_stream *s, ...@@ -451,16 +456,16 @@ struct cx18_queue *_cx18_stream_put_buf_fw(struct cx18_stream *s,
if (s->handle == CX18_INVALID_TASK_HANDLE || if (s->handle == CX18_INVALID_TASK_HANDLE ||
test_bit(CX18_F_S_STOPPING, &s->s_flags) || test_bit(CX18_F_S_STOPPING, &s->s_flags) ||
!test_bit(CX18_F_S_STREAMING, &s->s_flags)) !test_bit(CX18_F_S_STREAMING, &s->s_flags))
return cx18_enqueue(s, buf, &s->q_free); return cx18_enqueue(s, mdl, &s->q_free);
q = cx18_enqueue(s, buf, &s->q_busy); q = cx18_enqueue(s, mdl, &s->q_busy);
if (q != &s->q_busy) if (q != &s->q_busy)
return q; /* The firmware has the max buffers it can handle */ return q; /* The firmware has the max MDLs it can handle */
cx18_buf_sync_for_device(s, buf); cx18_mdl_sync_for_device(s, mdl);
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle, cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
(void __iomem *) &cx->scb->cpu_mdl[buf->id] - cx->enc_mem, (void __iomem *) &cx->scb->cpu_mdl[mdl->id] - cx->enc_mem,
1, buf->id, s->buf_size); s->bufs_per_mdl, mdl->id, s->mdl_size);
return q; return q;
} }
...@@ -468,7 +473,7 @@ static ...@@ -468,7 +473,7 @@ static
void _cx18_stream_load_fw_queue(struct cx18_stream *s) void _cx18_stream_load_fw_queue(struct cx18_stream *s)
{ {
struct cx18_queue *q; struct cx18_queue *q;
struct cx18_buffer *buf; struct cx18_mdl *mdl;
if (atomic_read(&s->q_free.depth) == 0 || if (atomic_read(&s->q_free.depth) == 0 ||
atomic_read(&s->q_busy.depth) >= CX18_MAX_FW_MDLS_PER_STREAM) atomic_read(&s->q_busy.depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
...@@ -476,10 +481,10 @@ void _cx18_stream_load_fw_queue(struct cx18_stream *s) ...@@ -476,10 +481,10 @@ void _cx18_stream_load_fw_queue(struct cx18_stream *s)
/* Move from q_free to q_busy notifying the firmware, until the limit */ /* Move from q_free to q_busy notifying the firmware, until the limit */
do { do {
buf = cx18_dequeue(s, &s->q_free); mdl = cx18_dequeue(s, &s->q_free);
if (buf == NULL) if (mdl == NULL)
break; break;
q = _cx18_stream_put_buf_fw(s, buf); q = _cx18_stream_put_mdl_fw(s, mdl);
} while (atomic_read(&s->q_busy.depth) < CX18_MAX_FW_MDLS_PER_STREAM } while (atomic_read(&s->q_busy.depth) < CX18_MAX_FW_MDLS_PER_STREAM
&& q == &s->q_busy); && q == &s->q_busy);
} }
...@@ -492,11 +497,21 @@ void cx18_out_work_handler(struct work_struct *work) ...@@ -492,11 +497,21 @@ void cx18_out_work_handler(struct work_struct *work)
_cx18_stream_load_fw_queue(s); _cx18_stream_load_fw_queue(s);
} }
static void cx18_stream_configure_mdls(struct cx18_stream *s)
{
cx18_unload_queues(s);
/* For now */
s->bufs_per_mdl = 1;
s->mdl_size = s->buf_size * s->bufs_per_mdl;
cx18_load_queues(s);
}
int cx18_start_v4l2_encode_stream(struct cx18_stream *s) int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
{ {
u32 data[MAX_MB_ARGUMENTS]; u32 data[MAX_MB_ARGUMENTS];
struct cx18 *cx = s->cx; struct cx18 *cx = s->cx;
struct cx18_buffer *buf;
int captype = 0; int captype = 0;
struct cx18_api_func_private priv; struct cx18_api_func_private priv;
...@@ -619,14 +634,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s) ...@@ -619,14 +634,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
(void __iomem *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem); (void __iomem *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem);
/* Init all the cpu_mdls for this stream */ /* Init all the cpu_mdls for this stream */
cx18_flush_queues(s); cx18_stream_configure_mdls(s);
spin_lock(&s->q_free.lock);
list_for_each_entry(buf, &s->q_free.list, list) {
cx18_writel(cx, buf->dma_handle,
&cx->scb->cpu_mdl[buf->id].paddr);
cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length);
}
spin_unlock(&s->q_free.lock);
_cx18_stream_load_fw_queue(s); _cx18_stream_load_fw_queue(s);
/* begin_capture */ /* begin_capture */
......
...@@ -28,18 +28,18 @@ int cx18_streams_setup(struct cx18 *cx); ...@@ -28,18 +28,18 @@ int cx18_streams_setup(struct cx18 *cx);
int cx18_streams_register(struct cx18 *cx); int cx18_streams_register(struct cx18 *cx);
void cx18_streams_cleanup(struct cx18 *cx, int unregister); void cx18_streams_cleanup(struct cx18 *cx, int unregister);
/* Related to submission of buffers to firmware */ /* Related to submission of mdls to firmware */
static inline void cx18_stream_load_fw_queue(struct cx18_stream *s) static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
{ {
struct cx18 *cx = s->cx; struct cx18 *cx = s->cx;
queue_work(cx->out_work_queue, &s->out_work_order); queue_work(cx->out_work_queue, &s->out_work_order);
} }
static inline void cx18_stream_put_buf_fw(struct cx18_stream *s, static inline void cx18_stream_put_mdl_fw(struct cx18_stream *s,
struct cx18_buffer *buf) struct cx18_mdl *mdl)
{ {
/* Put buf on q_free; the out work handler will move buf(s) to q_busy */ /* Put mdl on q_free; the out work handler will move mdl(s) to q_busy */
cx18_enqueue(s, buf, &s->q_free); cx18_enqueue(s, mdl, &s->q_free);
cx18_stream_load_fw_queue(s); cx18_stream_load_fw_queue(s);
} }
......
...@@ -105,6 +105,7 @@ static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp) ...@@ -105,6 +105,7 @@ static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp)
/* Compress raw VBI format, removes leading SAV codes and surplus space /* Compress raw VBI format, removes leading SAV codes and surplus space
after the frame. Returns new compressed size. */ after the frame. Returns new compressed size. */
/* FIXME - this function ignores the input size. */
static u32 compress_raw_buf(struct cx18 *cx, u8 *buf, u32 size, u32 hdr_size) static u32 compress_raw_buf(struct cx18 *cx, u8 *buf, u32 size, u32 hdr_size)
{ {
u32 line_size = vbi_active_samples; u32 line_size = vbi_active_samples;
...@@ -185,8 +186,7 @@ static u32 compress_sliced_buf(struct cx18 *cx, u8 *buf, u32 size, ...@@ -185,8 +186,7 @@ static u32 compress_sliced_buf(struct cx18 *cx, u8 *buf, u32 size,
return line; return line;
} }
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf, static void _cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf)
int streamtype)
{ {
/* /*
* The CX23418 provides a 12 byte header in its raw VBI buffers to us: * The CX23418 provides a 12 byte header in its raw VBI buffers to us:
...@@ -203,9 +203,6 @@ void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf, ...@@ -203,9 +203,6 @@ void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf,
u32 pts; u32 pts;
int lines; int lines;
if (streamtype != CX18_ENC_STREAM_TYPE_VBI)
return;
/* /*
* The CX23418 sends us data that is 32 bit little-endian swapped, * The CX23418 sends us data that is 32 bit little-endian swapped,
* but we want the raw VBI bytes in the order they were in the raster * but we want the raw VBI bytes in the order they were in the raster
...@@ -250,3 +247,31 @@ void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf, ...@@ -250,3 +247,31 @@ void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf,
copy_vbi_data(cx, lines, pts); copy_vbi_data(cx, lines, pts);
cx->vbi.frame++; cx->vbi.frame++;
} }
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_mdl *mdl,
int streamtype)
{
struct cx18_buffer *buf;
u32 orig_used;
if (streamtype != CX18_ENC_STREAM_TYPE_VBI)
return;
/*
* Big assumption here:
* Every buffer hooked to the MDL's buf_list is a complete VBI frame
* that ends at the end of the buffer.
*
* To assume anything else would make the code in this file
* more complex, or require extra memcpy()'s to make the
* buffers satisfy the above assumption. It's just simpler to set
* up the encoder buffer transfers to make the assumption true.
*/
list_for_each_entry(buf, &mdl->buf_list, list) {
orig_used = buf->bytesused;
if (orig_used == 0)
break;
_cx18_process_vbi_data(cx, buf);
mdl->bytesused -= (orig_used - buf->bytesused);
}
}
...@@ -21,6 +21,6 @@ ...@@ -21,6 +21,6 @@
* 02111-1307 USA * 02111-1307 USA
*/ */
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf, void cx18_process_vbi_data(struct cx18 *cx, struct cx18_mdl *mdl,
int streamtype); int streamtype);
int cx18_used_line(struct cx18 *cx, int line, int field); int cx18_used_line(struct cx18 *cx, int line, int field);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment