Commit b0b4a6b1 authored by Sanjay R Mehta's avatar Sanjay R Mehta Committed by Vinod Koul

dmaengine: ptdma: register PTDMA controller as a DMA resource

Register ptdma queue to Linux dmaengine framework as general-purpose
DMA channels.
Signed-off-by: default avatarSanjay R Mehta <sanju.mehta@amd.com>
Link: https://lore.kernel.org/r/1629208559-51964-3-git-send-email-Sanju.Mehta@amd.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent fa5d823b
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
config AMD_PTDMA config AMD_PTDMA
tristate "AMD PassThru DMA Engine" tristate "AMD PassThru DMA Engine"
depends on X86_64 && PCI depends on X86_64 && PCI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help help
Enable support for the AMD PTDMA controller. This controller Enable support for the AMD PTDMA controller. This controller
provides DMA capabilities to perform high bandwidth memory to provides DMA capabilities to perform high bandwidth memory to
......
...@@ -5,6 +5,6 @@ ...@@ -5,6 +5,6 @@
obj-$(CONFIG_AMD_PTDMA) += ptdma.o obj-$(CONFIG_AMD_PTDMA) += ptdma.o
ptdma-objs := ptdma-dev.o ptdma-objs := ptdma-dev.o ptdma-dmaengine.o
ptdma-$(CONFIG_PCI) += ptdma-pci.o ptdma-$(CONFIG_PCI) += ptdma-pci.o
...@@ -123,6 +123,26 @@ static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) ...@@ -123,6 +123,26 @@ static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C); iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
} }
static void pt_do_cmd_complete(unsigned long data)
{
struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
struct pt_cmd *cmd = tdata->cmd;
struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
u32 tail;
if (cmd_q->cmd_error) {
/*
* Log the error and flush the queue by
* moving the head pointer
*/
tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
pt_log_error(cmd_q->pt, cmd_q->cmd_error);
iowrite32(tail, cmd_q->reg_control + 0x0008);
}
cmd->pt_cmd_callback(cmd->data, cmd->ret);
}
static irqreturn_t pt_core_irq_handler(int irq, void *data) static irqreturn_t pt_core_irq_handler(int irq, void *data)
{ {
struct pt_device *pt = data; struct pt_device *pt = data;
...@@ -143,6 +163,7 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data) ...@@ -143,6 +163,7 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
/* Acknowledge the interrupt */ /* Acknowledge the interrupt */
iowrite32(status, cmd_q->reg_control + 0x0010); iowrite32(status, cmd_q->reg_control + 0x0010);
pt_core_enable_queue_interrupts(pt); pt_core_enable_queue_interrupts(pt);
pt_do_cmd_complete((ulong)&pt->tdata);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -224,8 +245,16 @@ int pt_core_init(struct pt_device *pt) ...@@ -224,8 +245,16 @@ int pt_core_init(struct pt_device *pt)
pt_core_enable_queue_interrupts(pt); pt_core_enable_queue_interrupts(pt);
/* Register the DMA engine support */
ret = pt_dmaengine_register(pt);
if (ret)
goto e_dmaengine;
return 0; return 0;
e_dmaengine:
free_irq(pt->pt_irq, pt);
e_dma_alloc: e_dma_alloc:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma); dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
...@@ -242,6 +271,9 @@ void pt_core_destroy(struct pt_device *pt) ...@@ -242,6 +271,9 @@ void pt_core_destroy(struct pt_device *pt)
struct pt_cmd_queue *cmd_q = &pt->cmd_q; struct pt_cmd_queue *cmd_q = &pt->cmd_q;
struct pt_cmd *cmd; struct pt_cmd *cmd;
/* Unregister the DMA engine */
pt_dmaengine_unregister(pt);
/* Disable and clear interrupts */ /* Disable and clear interrupts */
pt_core_disable_queue_interrupts(pt); pt_core_disable_queue_interrupts(pt);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Passthrough DMA device driver
* -- Based on the CCP driver
*
* Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
*
* Author: Sanjay R Mehta <sanju.mehta@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*/
#include "ptdma.h"
#include "../dmaengine.h"
#include "../virt-dma.h"
static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
{
return container_of(dma_chan, struct pt_dma_chan, vc.chan);
}
static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct pt_dma_desc, vd);
}
static void pt_free_chan_resources(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
vchan_free_chan_resources(&chan->vc);
}
static void pt_synchronize(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
vchan_synchronize(&chan->vc);
}
static void pt_do_cleanup(struct virt_dma_desc *vd)
{
struct pt_dma_desc *desc = to_pt_desc(vd);
struct pt_device *pt = desc->pt;
kmem_cache_free(pt->dma_desc_cache, desc);
}
static int pt_dma_start_desc(struct pt_dma_desc *desc)
{
struct pt_passthru_engine *pt_engine;
struct pt_device *pt;
struct pt_cmd *pt_cmd;
struct pt_cmd_queue *cmd_q;
desc->issued_to_hw = 1;
pt_cmd = &desc->pt_cmd;
pt = pt_cmd->pt;
cmd_q = &pt->cmd_q;
pt_engine = &pt_cmd->passthru;
pt->tdata.cmd = pt_cmd;
/* Execute the command */
pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
return 0;
}
static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
{
/* Get the next DMA descriptor on the active list */
struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
return vd ? to_pt_desc(vd) : NULL;
}
static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
struct pt_dma_desc *desc)
{
struct dma_async_tx_descriptor *tx_desc;
struct virt_dma_desc *vd;
unsigned long flags;
/* Loop over descriptors until one is found with commands */
do {
if (desc) {
if (!desc->issued_to_hw) {
/* No errors, keep going */
if (desc->status != DMA_ERROR)
return desc;
}
tx_desc = &desc->vd.tx;
vd = &desc->vd;
} else {
tx_desc = NULL;
}
spin_lock_irqsave(&chan->vc.lock, flags);
if (desc) {
if (desc->status != DMA_ERROR)
desc->status = DMA_COMPLETE;
dma_cookie_complete(tx_desc);
dma_descriptor_unmap(tx_desc);
list_del(&desc->vd.node);
}
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
if (tx_desc) {
dmaengine_desc_get_callback_invoke(tx_desc, NULL);
dma_run_dependencies(tx_desc);
vchan_vdesc_fini(vd);
}
} while (desc);
return NULL;
}
static void pt_cmd_callback(void *data, int err)
{
struct pt_dma_desc *desc = data;
struct dma_chan *dma_chan;
struct pt_dma_chan *chan;
int ret;
if (err == -EINPROGRESS)
return;
dma_chan = desc->vd.tx.chan;
chan = to_pt_chan(dma_chan);
if (err)
desc->status = DMA_ERROR;
while (true) {
/* Check for DMA descriptor completion */
desc = pt_handle_active_desc(chan, desc);
/* Don't submit cmd if no descriptor or DMA is paused */
if (!desc)
break;
ret = pt_dma_start_desc(desc);
if (!ret)
break;
desc->status = DMA_ERROR;
}
}
static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
unsigned long flags)
{
struct pt_dma_desc *desc;
desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
if (!desc)
return NULL;
vchan_tx_prep(&chan->vc, &desc->vd, flags);
desc->pt = chan->pt;
desc->issued_to_hw = 0;
desc->status = DMA_IN_PROGRESS;
return desc;
}
static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
dma_addr_t dst,
dma_addr_t src,
unsigned int len,
unsigned long flags)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_passthru_engine *pt_engine;
struct pt_dma_desc *desc;
struct pt_cmd *pt_cmd;
desc = pt_alloc_dma_desc(chan, flags);
if (!desc)
return NULL;
pt_cmd = &desc->pt_cmd;
pt_cmd->pt = chan->pt;
pt_engine = &pt_cmd->passthru;
pt_cmd->engine = PT_ENGINE_PASSTHRU;
pt_engine->src_dma = src;
pt_engine->dst_dma = dst;
pt_engine->src_len = len;
pt_cmd->pt_cmd_callback = pt_cmd_callback;
pt_cmd->data = desc;
desc->len = len;
return desc;
}
static struct dma_async_tx_descriptor *
pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
dma_addr_t src, size_t len, unsigned long flags)
{
struct pt_dma_desc *desc;
desc = pt_create_desc(dma_chan, dst, src, len, flags);
if (!desc)
return NULL;
return &desc->vd.tx;
}
static struct dma_async_tx_descriptor *
pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc;
desc = pt_alloc_dma_desc(chan, flags);
if (!desc)
return NULL;
return &desc->vd.tx;
}
static void pt_issue_pending(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_issue_pending(&chan->vc);
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was nothing active, start processing */
if (desc)
pt_cmd_callback(desc, 0);
}
static int pt_pause(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
pt_stop_queue(&chan->pt->cmd_q);
spin_unlock_irqrestore(&chan->vc.lock, flags);
return 0;
}
static int pt_resume(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc = NULL;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
pt_start_queue(&chan->pt->cmd_q);
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was something active, re-start */
if (desc)
pt_cmd_callback(desc, 0);
return 0;
}
static int pt_terminate_all(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags);
vchan_dma_desc_free_list(&chan->vc, &head);
vchan_free_chan_resources(&chan->vc);
return 0;
}
int pt_dmaengine_register(struct pt_device *pt)
{
struct pt_dma_chan *chan;
struct dma_device *dma_dev = &pt->dma_dev;
char *cmd_cache_name;
char *desc_cache_name;
int ret;
pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
GFP_KERNEL);
if (!pt->pt_dma_chan)
return -ENOMEM;
cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-cmd-cache",
dev_name(pt->dev));
if (!cmd_cache_name)
return -ENOMEM;
desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
dev_name(pt->dev));
if (!desc_cache_name) {
ret = -ENOMEM;
goto err_cache;
}
pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
sizeof(struct pt_dma_desc), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!pt->dma_desc_cache) {
ret = -ENOMEM;
goto err_cache;
}
dma_dev->dev = pt->dev;
dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
dma_dev->directions = DMA_MEM_TO_MEM;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
/*
* PTDMA is intended to be used with the AMD NTB devices, hence
* marking it as DMA_PRIVATE.
*/
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
INIT_LIST_HEAD(&dma_dev->channels);
chan = pt->pt_dma_chan;
chan->pt = pt;
/* Set base and prep routines */
dma_dev->device_free_chan_resources = pt_free_chan_resources;
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
dma_dev->device_issue_pending = pt_issue_pending;
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_pause = pt_pause;
dma_dev->device_resume = pt_resume;
dma_dev->device_terminate_all = pt_terminate_all;
dma_dev->device_synchronize = pt_synchronize;
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
return 0;
err_reg:
kmem_cache_destroy(pt->dma_desc_cache);
err_cache:
kmem_cache_destroy(pt->dma_cmd_cache);
return ret;
}
void pt_dmaengine_unregister(struct pt_device *pt)
{
struct dma_device *dma_dev = &pt->dma_dev;
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(pt->dma_desc_cache);
kmem_cache_destroy(pt->dma_cmd_cache);
}
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define __PT_DEV_H__ #define __PT_DEV_H__
#include <linux/device.h> #include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -21,6 +22,8 @@ ...@@ -21,6 +22,8 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include "../virt-dma.h"
#define MAX_PT_NAME_LEN 16 #define MAX_PT_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32 #define MAX_DMAPOOL_NAME_LEN 32
...@@ -170,6 +173,20 @@ struct pt_cmd { ...@@ -170,6 +173,20 @@ struct pt_cmd {
void *data; void *data;
}; };
struct pt_dma_desc {
struct virt_dma_desc vd;
struct pt_device *pt;
enum dma_status status;
size_t len;
bool issued_to_hw;
struct pt_cmd pt_cmd;
};
struct pt_dma_chan {
struct virt_dma_chan vc;
struct pt_device *pt;
};
struct pt_cmd_queue { struct pt_cmd_queue {
struct pt_device *pt; struct pt_device *pt;
...@@ -229,6 +246,12 @@ struct pt_device { ...@@ -229,6 +246,12 @@ struct pt_device {
*/ */
struct pt_cmd_queue cmd_q; struct pt_cmd_queue cmd_q;
/* Support for the DMA Engine capabilities */
struct dma_device dma_dev;
struct pt_dma_chan *pt_dma_chan;
struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
wait_queue_head_t lsb_queue; wait_queue_head_t lsb_queue;
struct pt_tasklet_data tdata; struct pt_tasklet_data tdata;
...@@ -281,6 +304,9 @@ struct pt_dev_vdata { ...@@ -281,6 +304,9 @@ struct pt_dev_vdata {
const unsigned int bar; const unsigned int bar;
}; };
int pt_dmaengine_register(struct pt_device *pt);
void pt_dmaengine_unregister(struct pt_device *pt);
int pt_core_init(struct pt_device *pt); int pt_core_init(struct pt_device *pt);
void pt_core_destroy(struct pt_device *pt); void pt_core_destroy(struct pt_device *pt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment