Commit 42cb6e07 authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/tegra' into for-linus

parents a74e7952 863326a6
......@@ -38,6 +38,9 @@
#include "dmaengine.h"
#define CREATE_TRACE_POINTS
#include <trace/events/tegra_apb_dma.h>
#define TEGRA_APBDMA_GENERAL 0x0
#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
......@@ -146,7 +149,7 @@ struct tegra_dma_channel_regs {
};
/*
* tegra_dma_sg_req: Dma request details to configure hardware. This
* tegra_dma_sg_req: DMA request details to configure hardware. This
* contains the details for one transfer to configure DMA hw.
* The client's request for data transfer can be broken into multiple
* sub-transfer as per requester details and hw support.
......@@ -155,7 +158,7 @@ struct tegra_dma_channel_regs {
*/
struct tegra_dma_sg_req {
struct tegra_dma_channel_regs ch_regs;
int req_len;
unsigned int req_len;
bool configured;
bool last_sg;
struct list_head node;
......@@ -169,8 +172,8 @@ struct tegra_dma_sg_req {
*/
struct tegra_dma_desc {
struct dma_async_tx_descriptor txd;
int bytes_requested;
int bytes_transferred;
unsigned int bytes_requested;
unsigned int bytes_transferred;
enum dma_status dma_status;
struct list_head node;
struct list_head tx_list;
......@@ -186,7 +189,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
/* tegra_dma_channel: Channel specific information */
struct tegra_dma_channel {
struct dma_chan dma_chan;
char name[30];
char name[12];
bool config_init;
int id;
int irq;
......@@ -574,7 +577,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
struct tegra_dma_sg_req *hsgreq = NULL;
if (list_empty(&tdc->pending_sg_req)) {
dev_err(tdc2dev(tdc), "Dma is running without req\n");
dev_err(tdc2dev(tdc), "DMA is running without req\n");
tegra_dma_stop(tdc);
return false;
}
......@@ -587,7 +590,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!hsgreq->configured) {
tegra_dma_stop(tdc);
dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
tegra_dma_abort_all(tdc);
return false;
}
......@@ -636,7 +639,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
dma_desc = sgreq->dma_desc;
dma_desc->bytes_transferred += sgreq->req_len;
/* if we dma for long enough the transfer count will wrap */
dma_desc->bytes_transferred =
(dma_desc->bytes_transferred + sgreq->req_len) %
dma_desc->bytes_requested;
/* Callback need to be call */
if (!dma_desc->cb_count)
......@@ -669,6 +675,8 @@ static void tegra_dma_tasklet(unsigned long data)
dmaengine_desc_get_callback(&dma_desc->txd, &cb);
cb_count = dma_desc->cb_count;
dma_desc->cb_count = 0;
trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
cb.callback);
spin_unlock_irqrestore(&tdc->lock, flags);
while (cb_count--)
dmaengine_desc_callback_invoke(&cb, NULL);
......@@ -685,6 +693,7 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
spin_lock_irqsave(&tdc->lock, flags);
trace_tegra_dma_isr(&tdc->dma_chan, irq);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
......@@ -843,6 +852,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
dma_set_residue(txstate, residual);
}
trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
......@@ -919,7 +929,7 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
return 0;
default:
dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
return -EINVAL;
}
return -EINVAL;
......@@ -952,7 +962,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
enum dma_slave_buswidth slave_bw;
if (!tdc->config_init) {
dev_err(tdc2dev(tdc), "dma channel is not configured\n");
dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
return NULL;
}
if (sg_len < 1) {
......@@ -985,7 +995,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
dma_desc = tegra_dma_desc_get(tdc);
if (!dma_desc) {
dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
return NULL;
}
INIT_LIST_HEAD(&dma_desc->tx_list);
......@@ -1005,14 +1015,14 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
if ((len & 3) || (mem & 3) ||
(len > tdc->tdma->chip_data->max_dma_count)) {
dev_err(tdc2dev(tdc),
"Dma length/memory address is not supported\n");
"DMA length/memory address is not supported\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
sg_req = tegra_dma_sg_req_get(tdc);
if (!sg_req) {
dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
......@@ -1087,7 +1097,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
* terminating the DMA.
*/
if (tdc->busy) {
dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
return NULL;
}
......@@ -1144,7 +1154,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
while (remain_len) {
sg_req = tegra_dma_sg_req_get(tdc);
if (!sg_req) {
dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
tegra_dma_desc_put(tdc, dma_desc);
return NULL;
}
......@@ -1319,8 +1329,9 @@ static int tegra_dma_probe(struct platform_device *pdev)
return -ENODEV;
}
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
tdma = devm_kzalloc(&pdev->dev,
struct_size(tdma, channels, cdata->nr_channels),
GFP_KERNEL);
if (!tdma)
return -ENOMEM;
......
......@@ -678,8 +678,9 @@ static int tegra_adma_probe(struct platform_device *pdev)
return -ENODEV;
}
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_adma_chan), GFP_KERNEL);
tdma = devm_kzalloc(&pdev->dev,
struct_size(tdma, channels, cdata->nr_channels),
GFP_KERNEL);
if (!tdma)
return -ENOMEM;
......
#if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_TEGRA_APM_DMA_H
#include <linux/tracepoint.h>
#include <linux/dmaengine.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM tegra_apb_dma
TRACE_EVENT(tegra_dma_tx_status,
TP_PROTO(struct dma_chan *dc, dma_cookie_t cookie, struct dma_tx_state *state),
TP_ARGS(dc, cookie, state),
TP_STRUCT__entry(
__string(chan, dev_name(&dc->dev->device))
__field(dma_cookie_t, cookie)
__field(__u32, residue)
),
TP_fast_assign(
__assign_str(chan, dev_name(&dc->dev->device));
__entry->cookie = cookie;
__entry->residue = state ? state->residue : (u32)-1;
),
TP_printk("channel %s: dma cookie %d, residue %u",
__get_str(chan), __entry->cookie, __entry->residue)
);
TRACE_EVENT(tegra_dma_complete_cb,
TP_PROTO(struct dma_chan *dc, int count, void *ptr),
TP_ARGS(dc, count, ptr),
TP_STRUCT__entry(
__string(chan, dev_name(&dc->dev->device))
__field(int, count)
__field(void *, ptr)
),
TP_fast_assign(
__assign_str(chan, dev_name(&dc->dev->device));
__entry->count = count;
__entry->ptr = ptr;
),
TP_printk("channel %s: done %d, ptr %p",
__get_str(chan), __entry->count, __entry->ptr)
);
TRACE_EVENT(tegra_dma_isr,
TP_PROTO(struct dma_chan *dc, int irq),
TP_ARGS(dc, irq),
TP_STRUCT__entry(
__string(chan, dev_name(&dc->dev->device))
__field(int, irq)
),
TP_fast_assign(
__assign_str(chan, dev_name(&dc->dev->device));
__entry->irq = irq;
),
TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
);
#endif /* _TRACE_TEGRADMA_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment