Commit b7e97d22 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine updates from Vinod Koul:
 "This time we have Andy updates on dw_dmac which is attempting to make
  this IP block available as PCI and platform device though not fully
  complete this time.

  We also have TI EDMA moving the dma driver to use dmaengine APIs, also
  have a new driver for mmp-tdma, along with bunch of small updates.

  Now for your excitement the merge is little unusual here, while
  merging the auto merge on linux-next picks wrong choice for pl330
  (drivers/dma/pl330.c) and this causes build failure.  The correct
  resolution is in linux-next.  (DMA: PL330: Fix build error) I didn't
  back merge your tree this time as you are better than me so no point
  in doing that for me :)"

Fixed the pl330 conflict as in linux-next, along with trivial header
file conflicts due to changed includes.

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (29 commits)
  dma: tegra: fix interrupt name issue with apb dma.
  dw_dmac: fix a regression in dwc_prep_dma_memcpy
  dw_dmac: introduce software emulation of LLP transfers
  dw_dmac: autoconfigure data_width or get it via platform data
  dw_dmac: autoconfigure block_size or use platform data
  dw_dmac: get number of channels from hardware if possible
  dw_dmac: fill optional encoded parameters in register structure
  dw_dmac: mark dwc_dump_chan_regs as inline
  DMA: PL330: return ENOMEM instead of 0 from pl330_alloc_chan_resources
  DMA: PL330: Remove redundant runtime_suspend/resume functions
  DMA: PL330: Remove controller clock enable/disable
  dmaengine: use kmem_cache_zalloc instead of kmem_cache_alloc/memset
  DMA: PL330: Set the capability of pdm0 and pdm1 as DMA_PRIVATE
  ARM: EXYNOS: Set the capability of pdm0 and pdm1 as DMA_PRIVATE
  dma: tegra: use list_move_tail instead of list_del/list_add_tail
  mxs/dma: Enlarge the CCW descriptor area to 4 pages
  dw_dmac: utilize slave_id to pass request line
  dmaengine: mmp_tdma: add dt support
  dmaengine: mmp-pdma support
  spi: davici - make davinci select edma
  ...
parents 943c2ace d0fc9054
...@@ -303,10 +303,12 @@ static int __init exynos_dma_init(void) ...@@ -303,10 +303,12 @@ static int __init exynos_dma_init(void)
dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask); dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask); dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
dma_cap_set(DMA_PRIVATE, exynos_pdma0_pdata.cap_mask);
amba_device_register(&exynos_pdma0_device, &iomem_resource); amba_device_register(&exynos_pdma0_device, &iomem_resource);
dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask); dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask); dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
dma_cap_set(DMA_PRIVATE, exynos_pdma1_pdata.cap_mask);
amba_device_register(&exynos_pdma1_device, &iomem_resource); amba_device_register(&exynos_pdma1_device, &iomem_resource);
dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask); dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask);
......
...@@ -78,6 +78,9 @@ struct dw_dma_platform_data dmac_plat_data = { ...@@ -78,6 +78,9 @@ struct dw_dma_platform_data dmac_plat_data = {
.nr_channels = 8, .nr_channels = 8,
.chan_allocation_order = CHAN_ALLOCATION_DESCENDING, .chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
.chan_priority = CHAN_PRIORITY_DESCENDING, .chan_priority = CHAN_PRIORITY_DESCENDING,
.block_size = 4095U,
.nr_masters = 2,
.data_width = { 3, 3, 0, 0 },
}; };
void __init spear13xx_l2x0_init(void) void __init spear13xx_l2x0_init(void)
......
...@@ -605,6 +605,9 @@ static void __init genclk_init_parent(struct clk *clk) ...@@ -605,6 +605,9 @@ static void __init genclk_init_parent(struct clk *clk)
static struct dw_dma_platform_data dw_dmac0_data = { static struct dw_dma_platform_data dw_dmac0_data = {
.nr_channels = 3, .nr_channels = 3,
.block_size = 4095U,
.nr_masters = 2,
.data_width = { 2, 2, 0, 0 },
}; };
static struct resource dw_dmac0_resource[] = { static struct resource dw_dmac0_resource[] = {
......
...@@ -208,6 +208,16 @@ config SIRF_DMA ...@@ -208,6 +208,16 @@ config SIRF_DMA
help help
Enable support for the CSR SiRFprimaII DMA engine. Enable support for the CSR SiRFprimaII DMA engine.
config TI_EDMA
tristate "TI EDMA support"
depends on ARCH_DAVINCI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
default n
help
Enable support for the TI EDMA controller. This DMA
engine is found on TI DaVinci and AM33xx parts.
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool bool
...@@ -292,6 +302,13 @@ config DMA_OMAP ...@@ -292,6 +302,13 @@ config DMA_OMAP
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
config MMP_PDMA
bool "MMP PDMA support"
depends on (ARCH_MMP || ARCH_PXA)
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platfrom.
config DMA_ENGINE config DMA_ENGINE
bool bool
......
...@@ -23,6 +23,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o ...@@ -23,6 +23,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PL330_DMA) += pl330.o
...@@ -32,3 +33,4 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o ...@@ -32,3 +33,4 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
...@@ -1892,6 +1892,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -1892,6 +1892,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->pd = dev_get_platdata(&adev->dev); pl08x->pd = dev_get_platdata(&adev->dev);
if (!pl08x->pd) { if (!pl08x->pd) {
dev_err(&adev->dev, "no platform data supplied\n"); dev_err(&adev->dev, "no platform data supplied\n");
ret = -EINVAL;
goto out_no_platdata; goto out_no_platdata;
} }
...@@ -1943,6 +1944,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -1943,6 +1944,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(&adev->dev, "%s failed to allocate " dev_err(&adev->dev, "%s failed to allocate "
"physical channel holders\n", "physical channel holders\n",
__func__); __func__);
ret = -ENOMEM;
goto out_no_phychans; goto out_no_phychans;
} }
......
This diff is collapsed.
...@@ -82,9 +82,39 @@ struct dw_dma_regs { ...@@ -82,9 +82,39 @@ struct dw_dma_regs {
DW_REG(ID); DW_REG(ID);
DW_REG(TEST); DW_REG(TEST);
/* reserved */
DW_REG(__reserved0);
DW_REG(__reserved1);
/* optional encoded params, 0x3c8..0x3f7 */ /* optional encoded params, 0x3c8..0x3f7 */
u32 __reserved;
/* per-channel configuration registers */
u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
u32 MULTI_BLK_TYPE;
u32 MAX_BLK_SIZE;
/* top-level parameters */
u32 DW_PARAMS;
}; };
/* To access the registers in early stage of probe */
#define dma_read_byaddr(addr, name) \
readl((addr) + offsetof(struct dw_dma_regs, name))
/* Bitfields in DW_PARAMS */
#define DW_PARAMS_NR_CHAN 8 /* number of channels */
#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
#define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
#define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
#define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
#define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
#define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
#define DW_PARAMS_EN 28 /* encoded parameters */
/* Bitfields in DWC_PARAMS */
#define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
/* Bitfields in CTL_LO */ /* Bitfields in CTL_LO */
#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */ #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
...@@ -140,10 +170,9 @@ struct dw_dma_regs { ...@@ -140,10 +170,9 @@ struct dw_dma_regs {
/* Bitfields in CFG */ /* Bitfields in CFG */
#define DW_CFG_DMA_EN (1 << 0) #define DW_CFG_DMA_EN (1 << 0)
#define DW_REGLEN 0x400
enum dw_dmac_flags { enum dw_dmac_flags {
DW_DMA_IS_CYCLIC = 0, DW_DMA_IS_CYCLIC = 0,
DW_DMA_IS_SOFT_LLP = 1,
}; };
struct dw_dma_chan { struct dw_dma_chan {
...@@ -154,6 +183,10 @@ struct dw_dma_chan { ...@@ -154,6 +183,10 @@ struct dw_dma_chan {
bool paused; bool paused;
bool initialized; bool initialized;
/* software emulation of the LLP transfers */
struct list_head *tx_list;
struct list_head *tx_node_active;
spinlock_t lock; spinlock_t lock;
/* these other elements are all protected by lock */ /* these other elements are all protected by lock */
...@@ -165,8 +198,15 @@ struct dw_dma_chan { ...@@ -165,8 +198,15 @@ struct dw_dma_chan {
unsigned int descs_allocated; unsigned int descs_allocated;
/* hardware configuration */
unsigned int block_size;
bool nollp;
/* configuration passed via DMA_SLAVE_CONFIG */ /* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig; struct dma_slave_config dma_sconfig;
/* backlink to dw_dma */
struct dw_dma *dw;
}; };
static inline struct dw_dma_chan_regs __iomem * static inline struct dw_dma_chan_regs __iomem *
...@@ -193,6 +233,10 @@ struct dw_dma { ...@@ -193,6 +233,10 @@ struct dw_dma {
u8 all_chan_mask; u8 all_chan_mask;
/* hardware configuration */
unsigned char nr_masters;
unsigned char data_width[4];
struct dw_dma_chan chan[0]; struct dw_dma_chan chan[0];
}; };
......
This diff is collapsed.
...@@ -434,12 +434,11 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f ...@@ -434,12 +434,11 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f
return NULL; return NULL;
memset(hw, 0, sizeof(*hw)); memset(hw, 0, sizeof(*hw));
desc = kmem_cache_alloc(ioat2_cache, flags); desc = kmem_cache_zalloc(ioat2_cache, flags);
if (!desc) { if (!desc) {
pci_pool_free(dma->dma_pool, hw, phys); pci_pool_free(dma->dma_pool, hw, phys);
return NULL; return NULL;
} }
memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc->txd, chan); dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = ioat2_tx_submit_unlock; desc->txd.tx_submit = ioat2_tx_submit_unlock;
......
...@@ -40,6 +40,17 @@ MODULE_VERSION(IOAT_DMA_VERSION); ...@@ -40,6 +40,17 @@ MODULE_VERSION(IOAT_DMA_VERSION);
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation"); MODULE_AUTHOR("Intel Corporation");
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
#define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23
#define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24
#define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25
#define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26
#define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27
#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
static struct pci_device_id ioat_pci_tbl[] = { static struct pci_device_id ioat_pci_tbl[] = {
/* I/OAT v1 platforms */ /* I/OAT v1 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
...@@ -83,6 +94,17 @@ static struct pci_device_id ioat_pci_tbl[] = { ...@@ -83,6 +94,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
{ 0, } { 0, }
}; };
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
......
This diff is collapsed.
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <mach/regs-icu.h> #include <mach/regs-icu.h>
#include <linux/platform_data/dma-mmp_tdma.h> #include <linux/platform_data/dma-mmp_tdma.h>
#include <linux/of_device.h>
#include "dmaengine.h" #include "dmaengine.h"
...@@ -127,7 +128,6 @@ struct mmp_tdma_device { ...@@ -127,7 +128,6 @@ struct mmp_tdma_device {
void __iomem *base; void __iomem *base;
struct dma_device device; struct dma_device device;
struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
int irq;
}; };
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
...@@ -492,7 +492,7 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, ...@@ -492,7 +492,7 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
return -ENOMEM; return -ENOMEM;
} }
if (irq) if (irq)
tdmac->irq = irq + idx; tdmac->irq = irq;
tdmac->dev = tdev->dev; tdmac->dev = tdev->dev;
tdmac->chan.device = &tdev->device; tdmac->chan.device = &tdev->device;
tdmac->idx = idx; tdmac->idx = idx;
...@@ -505,34 +505,43 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, ...@@ -505,34 +505,43 @@ static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
/* add the channel to tdma_chan list */ /* add the channel to tdma_chan list */
list_add_tail(&tdmac->chan.device_node, list_add_tail(&tdmac->chan.device_node,
&tdev->device.channels); &tdev->device.channels);
return 0; return 0;
} }
static struct of_device_id mmp_tdma_dt_ids[] = {
{ .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
{ .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
{}
};
MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids);
static int __devinit mmp_tdma_probe(struct platform_device *pdev) static int __devinit mmp_tdma_probe(struct platform_device *pdev)
{ {
const struct platform_device_id *id = platform_get_device_id(pdev); enum mmp_tdma_type type;
enum mmp_tdma_type type = id->driver_data; const struct of_device_id *of_id;
struct mmp_tdma_device *tdev; struct mmp_tdma_device *tdev;
struct resource *iores; struct resource *iores;
int i, ret; int i, ret;
int irq = 0; int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM; int chan_num = TDMA_CHANNEL_NUM;
of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
if (of_id)
type = (enum mmp_tdma_type) of_id->data;
else
type = platform_get_device_id(pdev)->driver_data;
/* always have couple channels */ /* always have couple channels */
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
if (!tdev) if (!tdev)
return -ENOMEM; return -ENOMEM;
tdev->dev = &pdev->dev; tdev->dev = &pdev->dev;
iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!iores)
return -EINVAL;
if (resource_size(iores) != chan_num) for (i = 0; i < chan_num; i++) {
tdev->irq = iores->start; if (platform_get_irq(pdev, i) > 0)
else irq_num++;
irq = iores->start; }
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) if (!iores)
...@@ -542,25 +551,26 @@ static int __devinit mmp_tdma_probe(struct platform_device *pdev) ...@@ -542,25 +551,26 @@ static int __devinit mmp_tdma_probe(struct platform_device *pdev)
if (!tdev->base) if (!tdev->base)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
if (tdev->irq) { INIT_LIST_HEAD(&tdev->device.channels);
ret = devm_request_irq(&pdev->dev, tdev->irq,
if (irq_num != chan_num) {
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
if (ret) if (ret)
return ret; return ret;
} }
dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
INIT_LIST_HEAD(&tdev->device.channels);
/* initialize channel parameters */ /* initialize channel parameters */
for (i = 0; i < chan_num; i++) { for (i = 0; i < chan_num; i++) {
irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
ret = mmp_tdma_chan_init(tdev, i, irq, type); ret = mmp_tdma_chan_init(tdev, i, irq, type);
if (ret) if (ret)
return ret; return ret;
} }
dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
tdev->device.dev = &pdev->dev; tdev->device.dev = &pdev->dev;
tdev->device.device_alloc_chan_resources = tdev->device.device_alloc_chan_resources =
mmp_tdma_alloc_chan_resources; mmp_tdma_alloc_chan_resources;
...@@ -595,6 +605,7 @@ static struct platform_driver mmp_tdma_driver = { ...@@ -595,6 +605,7 @@ static struct platform_driver mmp_tdma_driver = {
.driver = { .driver = {
.name = "mmp-tdma", .name = "mmp-tdma",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.of_match_table = mmp_tdma_dt_ids,
}, },
.id_table = mmp_tdma_id_table, .id_table = mmp_tdma_id_table,
.probe = mmp_tdma_probe, .probe = mmp_tdma_probe,
......
...@@ -101,7 +101,8 @@ struct mxs_dma_ccw { ...@@ -101,7 +101,8 @@ struct mxs_dma_ccw {
u32 pio_words[MXS_PIO_WORDS]; u32 pio_words[MXS_PIO_WORDS];
}; };
#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) #define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
#define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
struct mxs_dma_chan { struct mxs_dma_chan {
struct mxs_dma_engine *mxs_dma; struct mxs_dma_engine *mxs_dma;
...@@ -354,14 +355,15 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) ...@@ -354,14 +355,15 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
mxs_chan->chan_irq = data->chan_irq; mxs_chan->chan_irq = data->chan_irq;
mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
&mxs_chan->ccw_phys, GFP_KERNEL); CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
GFP_KERNEL);
if (!mxs_chan->ccw) { if (!mxs_chan->ccw) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_alloc; goto err_alloc;
} }
memset(mxs_chan->ccw, 0, PAGE_SIZE); memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE);
if (mxs_chan->chan_irq != NO_IRQ) { if (mxs_chan->chan_irq != NO_IRQ) {
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
...@@ -387,7 +389,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) ...@@ -387,7 +389,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
err_clk: err_clk:
free_irq(mxs_chan->chan_irq, mxs_dma); free_irq(mxs_chan->chan_irq, mxs_dma);
err_irq: err_irq:
dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys); mxs_chan->ccw, mxs_chan->ccw_phys);
err_alloc: err_alloc:
return ret; return ret;
...@@ -402,7 +404,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) ...@@ -402,7 +404,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
free_irq(mxs_chan->chan_irq, mxs_dma); free_irq(mxs_chan->chan_irq, mxs_dma);
dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys); mxs_chan->ccw, mxs_chan->ccw_phys);
clk_disable_unprepare(mxs_dma->clk); clk_disable_unprepare(mxs_dma->clk);
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/amba/bus.h> #include <linux/amba/bus.h>
#include <linux/amba/pl330.h> #include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/of.h> #include <linux/of.h>
...@@ -586,8 +585,6 @@ struct dma_pl330_dmac { ...@@ -586,8 +585,6 @@ struct dma_pl330_dmac {
/* Peripheral channels connected to this DMAC */ /* Peripheral channels connected to this DMAC */
struct dma_pl330_chan *peripherals; /* keep at end */ struct dma_pl330_chan *peripherals; /* keep at end */
struct clk *clk;
}; };
struct dma_pl330_desc { struct dma_pl330_desc {
...@@ -2395,7 +2392,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) ...@@ -2395,7 +2392,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
pch->pl330_chid = pl330_request_channel(&pdmac->pif); pch->pl330_chid = pl330_request_channel(&pdmac->pif);
if (!pch->pl330_chid) { if (!pch->pl330_chid) {
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
return 0; return -ENOMEM;
} }
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
...@@ -2889,29 +2886,17 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2889,29 +2886,17 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err1; goto probe_err1;
} }
pdmac->clk = clk_get(&adev->dev, "dma");
if (IS_ERR(pdmac->clk)) {
dev_err(&adev->dev, "Cannot get operation clock.\n");
ret = -EINVAL;
goto probe_err2;
}
amba_set_drvdata(adev, pdmac); amba_set_drvdata(adev, pdmac);
#ifndef CONFIG_PM_RUNTIME
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
irq = adev->irq[0]; irq = adev->irq[0];
ret = request_irq(irq, pl330_irq_handler, 0, ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi); dev_name(&adev->dev), pi);
if (ret) if (ret)
goto probe_err3; goto probe_err2;
ret = pl330_add(pi); ret = pl330_add(pi);
if (ret) if (ret)
goto probe_err4; goto probe_err3;
INIT_LIST_HEAD(&pdmac->desc_pool); INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock); spin_lock_init(&pdmac->pool_lock);
...@@ -2933,7 +2918,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2933,7 +2918,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (!pdmac->peripherals) { if (!pdmac->peripherals) {
ret = -ENOMEM; ret = -ENOMEM;
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
goto probe_err5; goto probe_err4;
} }
for (i = 0; i < num_chan; i++) { for (i = 0; i < num_chan; i++) {
...@@ -2961,6 +2946,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2961,6 +2946,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (pi->pcfg.num_peri) { if (pi->pcfg.num_peri) {
dma_cap_set(DMA_SLAVE, pd->cap_mask); dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_CYCLIC, pd->cap_mask); dma_cap_set(DMA_CYCLIC, pd->cap_mask);
dma_cap_set(DMA_PRIVATE, pd->cap_mask);
} }
} }
...@@ -2976,7 +2962,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2976,7 +2962,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd); ret = dma_async_device_register(pd);
if (ret) { if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n"); dev_err(&adev->dev, "unable to register DMAC\n");
goto probe_err5; goto probe_err4;
} }
dev_info(&adev->dev, dev_info(&adev->dev,
...@@ -2989,15 +2975,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2989,15 +2975,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return 0; return 0;
probe_err5:
pl330_del(pi);
probe_err4: probe_err4:
free_irq(irq, pi); pl330_del(pi);
probe_err3: probe_err3:
#ifndef CONFIG_PM_RUNTIME free_irq(irq, pi);
clk_disable(pdmac->clk);
#endif
clk_put(pdmac->clk);
probe_err2: probe_err2:
iounmap(pi->base); iounmap(pi->base);
probe_err1: probe_err1:
...@@ -3044,10 +3025,6 @@ static int __devexit pl330_remove(struct amba_device *adev) ...@@ -3044,10 +3025,6 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res; res = &adev->res;
release_mem_region(res->start, resource_size(res)); release_mem_region(res->start, resource_size(res));
#ifndef CONFIG_PM_RUNTIME
clk_disable(pdmac->clk);
#endif
kfree(pdmac); kfree(pdmac);
return 0; return 0;
...@@ -3063,49 +3040,10 @@ static struct amba_id pl330_ids[] = { ...@@ -3063,49 +3040,10 @@ static struct amba_id pl330_ids[] = {
MODULE_DEVICE_TABLE(amba, pl330_ids); MODULE_DEVICE_TABLE(amba, pl330_ids);
#ifdef CONFIG_PM_RUNTIME
static int pl330_runtime_suspend(struct device *dev)
{
struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
if (!pdmac) {
dev_err(dev, "failed to get dmac\n");
return -ENODEV;
}
clk_disable(pdmac->clk);
return 0;
}
static int pl330_runtime_resume(struct device *dev)
{
struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
if (!pdmac) {
dev_err(dev, "failed to get dmac\n");
return -ENODEV;
}
clk_enable(pdmac->clk);
return 0;
}
#else
#define pl330_runtime_suspend NULL
#define pl330_runtime_resume NULL
#endif /* CONFIG_PM_RUNTIME */
static const struct dev_pm_ops pl330_pm_ops = {
.runtime_suspend = pl330_runtime_suspend,
.runtime_resume = pl330_runtime_resume,
};
static struct amba_driver pl330_driver = { static struct amba_driver pl330_driver = {
.drv = { .drv = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "dma-pl330", .name = "dma-pl330",
.pm = &pl330_pm_ops,
}, },
.id_table = pl330_ids, .id_table = pl330_ids,
.probe = pl330_probe, .probe = pl330_probe,
......
...@@ -570,21 +570,19 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) ...@@ -570,21 +570,19 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
if (of_property_read_u32(dn, "cell-index", &id)) { if (of_property_read_u32(dn, "cell-index", &id)) {
dev_err(dev, "Fail to get DMAC index\n"); dev_err(dev, "Fail to get DMAC index\n");
ret = -ENODEV; return -ENODEV;
goto free_mem;
} }
sdma->irq = irq_of_parse_and_map(dn, 0); sdma->irq = irq_of_parse_and_map(dn, 0);
if (sdma->irq == NO_IRQ) { if (sdma->irq == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n"); dev_err(dev, "Error mapping IRQ!\n");
ret = -EINVAL; return -EINVAL;
goto free_mem;
} }
ret = of_address_to_resource(dn, 0, &res); ret = of_address_to_resource(dn, 0, &res);
if (ret) { if (ret) {
dev_err(dev, "Error parsing memory region!\n"); dev_err(dev, "Error parsing memory region!\n");
goto free_mem; goto irq_dispose;
} }
regs_start = res.start; regs_start = res.start;
...@@ -597,12 +595,11 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) ...@@ -597,12 +595,11 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
goto irq_dispose; goto irq_dispose;
} }
ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
sdma);
if (ret) { if (ret) {
dev_err(dev, "Error requesting IRQ!\n"); dev_err(dev, "Error requesting IRQ!\n");
ret = -EINVAL; ret = -EINVAL;
goto unmap_mem; goto irq_dispose;
} }
dma = &sdma->dma; dma = &sdma->dma;
...@@ -652,13 +649,9 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op) ...@@ -652,13 +649,9 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
return 0; return 0;
free_irq: free_irq:
devm_free_irq(dev, sdma->irq, sdma); free_irq(sdma->irq, sdma);
irq_dispose: irq_dispose:
irq_dispose_mapping(sdma->irq); irq_dispose_mapping(sdma->irq);
unmap_mem:
iounmap(sdma->base);
free_mem:
devm_kfree(dev, sdma);
return ret; return ret;
} }
...@@ -668,10 +661,8 @@ static int __devexit sirfsoc_dma_remove(struct platform_device *op) ...@@ -668,10 +661,8 @@ static int __devexit sirfsoc_dma_remove(struct platform_device *op)
struct sirfsoc_dma *sdma = dev_get_drvdata(dev); struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
dma_async_device_unregister(&sdma->dma); dma_async_device_unregister(&sdma->dma);
devm_free_irq(dev, sdma->irq, sdma); free_irq(sdma->irq, sdma);
irq_dispose_mapping(sdma->irq); irq_dispose_mapping(sdma->irq);
iounmap(sdma->base);
devm_kfree(dev, sdma);
return 0; return 0;
} }
......
...@@ -2921,19 +2921,23 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) ...@@ -2921,19 +2921,23 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
struct d40_base *base = NULL; struct d40_base *base = NULL;
int num_log_chans = 0; int num_log_chans = 0;
int num_phy_chans; int num_phy_chans;
int clk_ret = -EINVAL;
int i; int i;
u32 pid; u32 pid;
u32 cid; u32 cid;
u8 rev; u8 rev;
clk = clk_get(&pdev->dev, NULL); clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) { if (IS_ERR(clk)) {
d40_err(&pdev->dev, "No matching clock found\n"); d40_err(&pdev->dev, "No matching clock found\n");
goto failure; goto failure;
} }
clk_enable(clk); clk_ret = clk_prepare_enable(clk);
if (clk_ret) {
d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
goto failure;
}
/* Get IO for DMAC base address */ /* Get IO for DMAC base address */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
...@@ -3063,10 +3067,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) ...@@ -3063,10 +3067,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
return base; return base;
failure: failure:
if (!IS_ERR(clk)) { if (!clk_ret)
clk_disable(clk); clk_disable_unprepare(clk);
if (!IS_ERR(clk))
clk_put(clk); clk_put(clk);
}
if (virtbase) if (virtbase)
iounmap(virtbase); iounmap(virtbase);
if (res) if (res)
......
...@@ -169,6 +169,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, ...@@ -169,6 +169,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
/* tegra_dma_channel: Channel specific information */ /* tegra_dma_channel: Channel specific information */
struct tegra_dma_channel { struct tegra_dma_channel {
struct dma_chan dma_chan; struct dma_chan dma_chan;
char name[30];
bool config_init; bool config_init;
int id; int id;
int irq; int irq;
...@@ -475,8 +476,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) ...@@ -475,8 +476,7 @@ static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
while (!list_empty(&tdc->pending_sg_req)) { while (!list_empty(&tdc->pending_sg_req)) {
sgreq = list_first_entry(&tdc->pending_sg_req, sgreq = list_first_entry(&tdc->pending_sg_req,
typeof(*sgreq), node); typeof(*sgreq), node);
list_del(&sgreq->node); list_move_tail(&sgreq->node, &tdc->free_sg_req);
list_add_tail(&sgreq->node, &tdc->free_sg_req);
if (sgreq->last_sg) { if (sgreq->last_sg) {
dma_desc = sgreq->dma_desc; dma_desc = sgreq->dma_desc;
dma_desc->dma_status = DMA_ERROR; dma_desc->dma_status = DMA_ERROR;
...@@ -570,8 +570,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, ...@@ -570,8 +570,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
/* If not last req then put at end of pending list */ /* If not last req then put at end of pending list */
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
list_del(&sgreq->node); list_move_tail(&sgreq->node, &tdc->pending_sg_req);
list_add_tail(&sgreq->node, &tdc->pending_sg_req);
sgreq->configured = false; sgreq->configured = false;
st = handle_continuous_head_request(tdc, sgreq, to_terminate); st = handle_continuous_head_request(tdc, sgreq, to_terminate);
if (!st) if (!st)
...@@ -1284,7 +1283,6 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) ...@@ -1284,7 +1283,6 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&tdma->dma_dev.channels); INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) { for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i]; struct tegra_dma_channel *tdc = &tdma->channels[i];
char irq_name[30];
tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
...@@ -1296,9 +1294,9 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) ...@@ -1296,9 +1294,9 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
goto err_irq; goto err_irq;
} }
tdc->irq = res->start; tdc->irq = res->start;
snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
ret = devm_request_irq(&pdev->dev, tdc->irq, ret = devm_request_irq(&pdev->dev, tdc->irq,
tegra_dma_isr, 0, irq_name, tdc); tegra_dma_isr, 0, tdc->name, tdc);
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"request_irq failed with err %d channel %d\n", "request_irq failed with err %d channel %d\n",
......
...@@ -134,6 +134,7 @@ config SPI_DAVINCI ...@@ -134,6 +134,7 @@ config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
depends on ARCH_DAVINCI depends on ARCH_DAVINCI
select SPI_BITBANG select SPI_BITBANG
select TI_EDMA
help help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
......
This diff is collapsed.
...@@ -19,6 +19,10 @@ ...@@ -19,6 +19,10 @@
* @nr_channels: Number of channels supported by hardware (max 8) * @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for * @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator. * by the general purpose DMA channel allocator.
* @block_size: Maximum block size supported by the controller
* @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
*/ */
struct dw_dma_platform_data { struct dw_dma_platform_data {
unsigned int nr_channels; unsigned int nr_channels;
...@@ -29,6 +33,9 @@ struct dw_dma_platform_data { ...@@ -29,6 +33,9 @@ struct dw_dma_platform_data {
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
unsigned char chan_priority; unsigned char chan_priority;
unsigned short block_size;
unsigned char nr_masters;
unsigned char data_width[4];
}; };
/* bursts size */ /* bursts size */
......
/*
* TI EDMA DMA engine driver
*
* Copyright 2012 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_EDMA_H
#define __LINUX_EDMA_H
struct dma_chan;
#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE)
bool edma_filter_fn(struct dma_chan *, void *);
#else
static inline bool edma_filter_fn(struct dma_chan *chan, void *param)
{
return false;
}
#endif
#endif
/*
* MMP Platform DMA Management
*
* Copyright (c) 2011 Marvell Semiconductors Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#ifndef MMP_DMA_H
#define MMP_DMA_H
struct mmp_dma_platdata {
int dma_channels;
};
#endif /* MMP_DMA_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment