Commit ec13c78d authored by Mathieu Poirier's avatar Mathieu Poirier Committed by Greg Kroah-Hartman

coresight: tmc-etr: Add barrier packets when moving offset forward

This patch adds barrier packets in the trace stream when the offset in the
data buffer needs to be moved forward.  Otherwise the decoder isn't aware
of the break in the stream and can't synchronise itself with the trace
data.
Signed-off-by: default avatarMathieu Poirier <mathieu.poirier@linaro.org>
Tested-by: default avatarYabin Cui <yabinc@google.com>
Reviewed-by: default avatarLeo Yan <leo.yan@linaro.org>
Link: https://lore.kernel.org/r/20190829202842.580-18-mathieu.poirier@linaro.orgSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3507d231
...@@ -1418,10 +1418,11 @@ static void tmc_free_etr_buffer(void *config) ...@@ -1418,10 +1418,11 @@ static void tmc_free_etr_buffer(void *config)
* buffer to the perf ring buffer. * buffer to the perf ring buffer.
*/ */
static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf, static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
unsigned long src_offset,
unsigned long to_copy) unsigned long to_copy)
{ {
long bytes; long bytes;
long pg_idx, pg_offset, src_offset; long pg_idx, pg_offset;
unsigned long head = etr_perf->head; unsigned long head = etr_perf->head;
char **dst_pages, *src_buf; char **dst_pages, *src_buf;
struct etr_buf *etr_buf = etr_perf->etr_buf; struct etr_buf *etr_buf = etr_perf->etr_buf;
...@@ -1430,7 +1431,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf, ...@@ -1430,7 +1431,6 @@ static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf,
pg_idx = head >> PAGE_SHIFT; pg_idx = head >> PAGE_SHIFT;
pg_offset = head & (PAGE_SIZE - 1); pg_offset = head & (PAGE_SIZE - 1);
dst_pages = (char **)etr_perf->pages; dst_pages = (char **)etr_perf->pages;
src_offset = etr_buf->offset + etr_buf->len - to_copy;
while (to_copy > 0) { while (to_copy > 0) {
/* /*
...@@ -1478,7 +1478,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev, ...@@ -1478,7 +1478,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
void *config) void *config)
{ {
bool lost = false; bool lost = false;
unsigned long flags, size = 0; unsigned long flags, offset, size = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_perf_buffer *etr_perf = config; struct etr_perf_buffer *etr_perf = config;
struct etr_buf *etr_buf = etr_perf->etr_buf; struct etr_buf *etr_buf = etr_perf->etr_buf;
...@@ -1506,16 +1506,35 @@ tmc_update_etr_buffer(struct coresight_device *csdev, ...@@ -1506,16 +1506,35 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
spin_unlock_irqrestore(&drvdata->spinlock, flags); spin_unlock_irqrestore(&drvdata->spinlock, flags);
lost = etr_buf->full; lost = etr_buf->full;
offset = etr_buf->offset;
size = etr_buf->len; size = etr_buf->len;
/*
* The ETR buffer may be bigger than the space available in the
* perf ring buffer (handle->size). If so advance the offset so that we
* get the latest trace data. In snapshot mode none of that matters
* since we are expected to clobber stale data in favour of the latest
* traces.
*/
if (!etr_perf->snapshot && size > handle->size) { if (!etr_perf->snapshot && size > handle->size) {
size = handle->size; u32 mask = tmc_get_memwidth_mask(drvdata);
/*
* Make sure the new size is aligned in accordance with the
* requirement explained in function tmc_get_memwidth_mask().
*/
size = handle->size & mask;
offset = etr_buf->offset + etr_buf->len - size;
if (offset >= etr_buf->size)
offset -= etr_buf->size;
lost = true; lost = true;
} }
/* Insert barrier packets at the beginning, if there was an overflow */ /* Insert barrier packets at the beginning, if there was an overflow */
if (lost) if (lost)
tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
tmc_etr_sync_perf_buffer(etr_perf, size); tmc_etr_sync_perf_buffer(etr_perf, offset, size);
/* /*
* In snapshot mode we simply increment the head by the number of byte * In snapshot mode we simply increment the head by the number of byte
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment