Commit d9650682 authored by Ilya Novikov's avatar Ilya Novikov Committed by Vinod Koul

dmaengine: PTDMA: support polled mode

If the DMA_PREP_INTERRUPT flag is not provided, run in polled mode,
which significantly improves IOPS: more than twice on chunks < 4K.
Signed-off-by: default avatarIlya Novikov <i.m.novikov@yadro.com>
Link: https://lore.kernel.org/r/20220413113733.59041-1-i.m.novikov@yadro.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 57824530
...@@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, ...@@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine) struct pt_passthru_engine *pt_engine)
{ {
struct ptdma_desc desc; struct ptdma_desc desc;
struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
cmd_q->cmd_error = 0; cmd_q->cmd_error = 0;
cmd_q->total_pt_ops++; cmd_q->total_pt_ops++;
...@@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, ...@@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
desc.dst_lo = lower_32_bits(pt_engine->dst_dma); desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma); desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
return pt_core_execute_cmd(&desc, cmd_q); if (cmd_q->int_en)
} pt_core_enable_queue_interrupts(pt);
else
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) pt_core_disable_queue_interrupts(pt);
{
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
}
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) return pt_core_execute_cmd(&desc, cmd_q);
{
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
} }
static void pt_do_cmd_complete(unsigned long data) static void pt_do_cmd_complete(unsigned long data)
...@@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data) ...@@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data)
cmd->pt_cmd_callback(cmd->data, cmd->ret); cmd->pt_cmd_callback(cmd->data, cmd->ret);
} }
static irqreturn_t pt_core_irq_handler(int irq, void *data) void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
{ {
struct pt_device *pt = data;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
u32 status; u32 status;
pt_core_disable_queue_interrupts(pt);
pt->total_interrupts++;
status = ioread32(cmd_q->reg_control + 0x0010); status = ioread32(cmd_q->reg_control + 0x0010);
if (status) { if (status) {
cmd_q->int_status = status; cmd_q->int_status = status;
...@@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data) ...@@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
if ((status & INT_ERROR) && !cmd_q->cmd_error) if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
/* Acknowledge the interrupt */ /* Acknowledge the completion */
iowrite32(status, cmd_q->reg_control + 0x0010); iowrite32(status, cmd_q->reg_control + 0x0010);
pt_core_enable_queue_interrupts(pt);
pt_do_cmd_complete((ulong)&pt->tdata); pt_do_cmd_complete((ulong)&pt->tdata);
} }
}
static irqreturn_t pt_core_irq_handler(int irq, void *data)
{
struct pt_device *pt = data;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
pt_core_disable_queue_interrupts(pt);
pt->total_interrupts++;
pt_check_status_trans(pt, cmd_q);
pt_core_enable_queue_interrupts(pt);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan, ...@@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
vchan_tx_prep(&chan->vc, &desc->vd, flags); vchan_tx_prep(&chan->vc, &desc->vd, flags);
desc->pt = chan->pt; desc->pt = chan->pt;
desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
desc->issued_to_hw = 0; desc->issued_to_hw = 0;
desc->status = DMA_IN_PROGRESS; desc->status = DMA_IN_PROGRESS;
...@@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan) ...@@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
pt_cmd_callback(desc, 0); pt_cmd_callback(desc, 0);
} }
enum dma_status
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct pt_device *pt = to_pt_chan(c)->pt;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
pt_check_status_trans(pt, cmd_q);
return dma_cookie_status(c, cookie, txstate);
}
static int pt_pause(struct dma_chan *dma_chan) static int pt_pause(struct dma_chan *dma_chan)
{ {
struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_chan *chan = to_pt_chan(dma_chan);
...@@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan) ...@@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
{ {
struct pt_dma_chan *chan = to_pt_chan(dma_chan); struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags; unsigned long flags;
struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
LIST_HEAD(head); LIST_HEAD(head);
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
spin_lock_irqsave(&chan->vc.lock, flags); spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head); vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags); spin_unlock_irqrestore(&chan->vc.lock, flags);
...@@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt) ...@@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt)
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt; dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
dma_dev->device_issue_pending = pt_issue_pending; dma_dev->device_issue_pending = pt_issue_pending;
dma_dev->device_tx_status = dma_cookie_status; dma_dev->device_tx_status = pt_tx_status;
dma_dev->device_pause = pt_pause; dma_dev->device_pause = pt_pause;
dma_dev->device_resume = pt_resume; dma_dev->device_resume = pt_resume;
dma_dev->device_terminate_all = pt_terminate_all; dma_dev->device_terminate_all = pt_terminate_all;
......
...@@ -206,6 +206,9 @@ struct pt_cmd_queue { ...@@ -206,6 +206,9 @@ struct pt_cmd_queue {
unsigned int active; unsigned int active;
unsigned int suspended; unsigned int suspended;
/* Interrupt flag */
bool int_en;
/* Register addresses for queue */ /* Register addresses for queue */
void __iomem *reg_control; void __iomem *reg_control;
u32 qcontrol; /* Cached control register */ u32 qcontrol; /* Cached control register */
...@@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt); ...@@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt);
int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine); struct pt_passthru_engine *pt_engine);
void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q);
void pt_start_queue(struct pt_cmd_queue *cmd_q); void pt_start_queue(struct pt_cmd_queue *cmd_q);
void pt_stop_queue(struct pt_cmd_queue *cmd_q); void pt_stop_queue(struct pt_cmd_queue *cmd_q);
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
{
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
}
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
{
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment