Commit 112db20e authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/mv_xor' into for-linus

parents ee5644ce c39290a1
* Marvell XOR engines * Marvell XOR engines
Required properties: Required properties:
- compatible: Should be "marvell,orion-xor" or "marvell,armada-380-xor" - compatible: Should be one of the following:
- "marvell,orion-xor"
- "marvell,armada-380-xor"
- "marvell,armada-3700-xor".
- reg: Should contain registers location and length (two sets) - reg: Should contain registers location and length (two sets)
the first set is the low registers, the second set the high the first set is the low registers, the second set the high
registers for the XOR engine. registers for the XOR engine.
......
...@@ -332,7 +332,7 @@ config MPC512X_DMA ...@@ -332,7 +332,7 @@ config MPC512X_DMA
config MV_XOR config MV_XOR
bool "Marvell XOR engine support" bool "Marvell XOR engine support"
depends on PLAT_ORION depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
select DMA_ENGINE select DMA_ENGINE
select DMA_ENGINE_RAID select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH select ASYNC_TX_ENABLE_CHANNEL_SWITCH
......
...@@ -31,6 +31,12 @@ ...@@ -31,6 +31,12 @@
#include "dmaengine.h" #include "dmaengine.h"
#include "mv_xor.h" #include "mv_xor.h"
enum mv_xor_type {
XOR_ORION,
XOR_ARMADA_38X,
XOR_ARMADA_37XX,
};
enum mv_xor_mode { enum mv_xor_mode {
XOR_MODE_IN_REG, XOR_MODE_IN_REG,
XOR_MODE_IN_DESC, XOR_MODE_IN_DESC,
...@@ -477,7 +483,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, ...@@ -477,7 +483,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan), dev_dbg(mv_chan_to_devp(mv_chan),
"%s src_cnt: %d len: %u dest %pad flags: %ld\n", "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
__func__, src_cnt, len, &dest, flags); __func__, src_cnt, len, &dest, flags);
sw_desc = mv_chan_alloc_slot(mv_chan); sw_desc = mv_chan_alloc_slot(mv_chan);
...@@ -933,7 +939,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) ...@@ -933,7 +939,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
static struct mv_xor_chan * static struct mv_xor_chan *
mv_xor_channel_add(struct mv_xor_device *xordev, mv_xor_channel_add(struct mv_xor_device *xordev,
struct platform_device *pdev, struct platform_device *pdev,
int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc) int idx, dma_cap_mask_t cap_mask, int irq)
{ {
int ret = 0; int ret = 0;
struct mv_xor_chan *mv_chan; struct mv_xor_chan *mv_chan;
...@@ -945,7 +951,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, ...@@ -945,7 +951,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->idx = idx; mv_chan->idx = idx;
mv_chan->irq = irq; mv_chan->irq = irq;
mv_chan->op_in_desc = op_in_desc; if (xordev->xor_type == XOR_ORION)
mv_chan->op_in_desc = XOR_MODE_IN_REG;
else
mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev; dma_dev = &mv_chan->dmadev;
...@@ -1085,6 +1094,33 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, ...@@ -1085,6 +1094,33 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
writel(0, base + WINDOW_OVERRIDE_CTRL(1)); writel(0, base + WINDOW_OVERRIDE_CTRL(1));
} }
static void
mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
{
void __iomem *base = xordev->xor_high_base;
u32 win_enable = 0;
int i;
for (i = 0; i < 8; i++) {
writel(0, base + WINDOW_BASE(i));
writel(0, base + WINDOW_SIZE(i));
if (i < 4)
writel(0, base + WINDOW_REMAP_HIGH(i));
}
/*
* For Armada3700 open default 4GB Mbus window. The dram
* related configuration are done at AXIS level.
*/
writel(0xffff0000, base + WINDOW_SIZE(0));
win_enable |= 1;
win_enable |= 3 << 16;
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
writel(0, base + WINDOW_OVERRIDE_CTRL(0));
writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
/* /*
* Since this XOR driver is basically used only for RAID5, we don't * Since this XOR driver is basically used only for RAID5, we don't
* need to care about synchronizing ->suspend with DMA activity, * need to care about synchronizing ->suspend with DMA activity,
...@@ -1129,6 +1165,11 @@ static int mv_xor_resume(struct platform_device *dev) ...@@ -1129,6 +1165,11 @@ static int mv_xor_resume(struct platform_device *dev)
XOR_INTR_MASK(mv_chan)); XOR_INTR_MASK(mv_chan));
} }
if (xordev->xor_type == XOR_ARMADA_37XX) {
mv_xor_conf_mbus_windows_a3700(xordev);
return 0;
}
dram = mv_mbus_dram_info(); dram = mv_mbus_dram_info();
if (dram) if (dram)
mv_xor_conf_mbus_windows(xordev, dram); mv_xor_conf_mbus_windows(xordev, dram);
...@@ -1137,8 +1178,9 @@ static int mv_xor_resume(struct platform_device *dev) ...@@ -1137,8 +1178,9 @@ static int mv_xor_resume(struct platform_device *dev)
} }
static const struct of_device_id mv_xor_dt_ids[] = { static const struct of_device_id mv_xor_dt_ids[] = {
{ .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
{ .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
{}, {},
}; };
...@@ -1152,7 +1194,6 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1152,7 +1194,6 @@ static int mv_xor_probe(struct platform_device *pdev)
struct resource *res; struct resource *res;
unsigned int max_engines, max_channels; unsigned int max_engines, max_channels;
int i, ret; int i, ret;
int op_in_desc;
dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
...@@ -1180,12 +1221,30 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1180,12 +1221,30 @@ static int mv_xor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xordev); platform_set_drvdata(pdev, xordev);
/*
* We need to know which type of XOR device we use before
* setting up. In non-dt case it can only be the legacy one.
*/
xordev->xor_type = XOR_ORION;
if (pdev->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(mv_xor_dt_ids,
&pdev->dev);
xordev->xor_type = (uintptr_t)of_id->data;
}
/* /*
* (Re-)program MBUS remapping windows if we are asked to. * (Re-)program MBUS remapping windows if we are asked to.
*/ */
dram = mv_mbus_dram_info(); if (xordev->xor_type == XOR_ARMADA_37XX) {
if (dram) mv_xor_conf_mbus_windows_a3700(xordev);
mv_xor_conf_mbus_windows(xordev, dram); } else {
dram = mv_mbus_dram_info();
if (dram)
mv_xor_conf_mbus_windows(xordev, dram);
}
/* Not all platforms can gate the clock, so it is not /* Not all platforms can gate the clock, so it is not
* an error if the clock does not exists. * an error if the clock does not exists.
...@@ -1199,12 +1258,16 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1199,12 +1258,16 @@ static int mv_xor_probe(struct platform_device *pdev)
* order for async_tx to perform well. So we limit the number * order for async_tx to perform well. So we limit the number
* of engines and channels so that we take into account this * of engines and channels so that we take into account this
* constraint. Note that we also want to use channels from * constraint. Note that we also want to use channels from
* separate engines when possible. * separate engines when possible. For dual-CPU Armada 3700
* SoC with single XOR engine allow using its both channels.
*/ */
max_engines = num_present_cpus(); max_engines = num_present_cpus();
max_channels = min_t(unsigned int, if (xordev->xor_type == XOR_ARMADA_37XX)
MV_XOR_MAX_CHANNELS, max_channels = num_present_cpus();
DIV_ROUND_UP(num_present_cpus(), 2)); else
max_channels = min_t(unsigned int,
MV_XOR_MAX_CHANNELS,
DIV_ROUND_UP(num_present_cpus(), 2));
if (mv_xor_engine_count >= max_engines) if (mv_xor_engine_count >= max_engines)
return 0; return 0;
...@@ -1212,15 +1275,11 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1212,15 +1275,11 @@ static int mv_xor_probe(struct platform_device *pdev)
if (pdev->dev.of_node) { if (pdev->dev.of_node) {
struct device_node *np; struct device_node *np;
int i = 0; int i = 0;
const struct of_device_id *of_id =
of_match_device(mv_xor_dt_ids,
&pdev->dev);
for_each_child_of_node(pdev->dev.of_node, np) { for_each_child_of_node(pdev->dev.of_node, np) {
struct mv_xor_chan *chan; struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask; dma_cap_mask_t cap_mask;
int irq; int irq;
op_in_desc = (int)of_id->data;
if (i >= max_channels) if (i >= max_channels)
continue; continue;
...@@ -1237,7 +1296,7 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1237,7 +1296,7 @@ static int mv_xor_probe(struct platform_device *pdev)
} }
chan = mv_xor_channel_add(xordev, pdev, i, chan = mv_xor_channel_add(xordev, pdev, i,
cap_mask, irq, op_in_desc); cap_mask, irq);
if (IS_ERR(chan)) { if (IS_ERR(chan)) {
ret = PTR_ERR(chan); ret = PTR_ERR(chan);
irq_dispose_mapping(irq); irq_dispose_mapping(irq);
...@@ -1266,8 +1325,7 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1266,8 +1325,7 @@ static int mv_xor_probe(struct platform_device *pdev)
} }
chan = mv_xor_channel_add(xordev, pdev, i, chan = mv_xor_channel_add(xordev, pdev, i,
cd->cap_mask, irq, cd->cap_mask, irq);
XOR_MODE_IN_REG);
if (IS_ERR(chan)) { if (IS_ERR(chan)) {
ret = PTR_ERR(chan); ret = PTR_ERR(chan);
goto err_channel_add; goto err_channel_add;
......
...@@ -85,6 +85,7 @@ struct mv_xor_device { ...@@ -85,6 +85,7 @@ struct mv_xor_device {
void __iomem *xor_high_base; void __iomem *xor_high_base;
struct clk *clk; struct clk *clk;
struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
int xor_type;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment