Commit b14ad90c authored by David S. Miller's avatar David S. Miller

Merge branch 'cpsw-cpdma-DDR'

Grygorii Strashko says:

====================
net: ethernet: ti: cpsw: support placing CPDMA descriptors into DDR

This series intended to add support for placing CPDMA descriptors into
DDR by introducing new module parameter "descs_pool_size" to specify
size of descriptor's pool. The "descs_pool_size" defines total number
of CPDMA CPPI descriptors to be used for both ingress/egress packets
processing. If not specified - the default value 256 will be used
which will allow to place descriptor's pool into the internal CPPI
RAM.

In addition, added ability to re-split CPDMA pool of descriptors
between RX and TX path via ethtool '-G' command wich will allow to
configure and fix number of descriptors used by RX and TX path, which,
then, will be split between RX/TX channels proportionally depending on
number of RX/TX channels and its weight.

This allows significantly to reduce UDP packets drop rate for
bandwidth >301 Mbits/sec (am57x).

Before enabling this feature, the am437x SoC has to be fixed as it's
proved that it's not working when CPDMA descriptors placed in DDR.
So, the patch 1 fixes this issue.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 350a4718 c40d8883
...@@ -23,7 +23,6 @@ Required properties: ...@@ -23,7 +23,6 @@ Required properties:
Optional properties: Optional properties:
- ti,hwmods : Must be "cpgmac0" - ti,hwmods : Must be "cpgmac0"
- no_bd_ram : Must be 0 or 1
- dual_emac : Specifies Switch to act as Dual EMAC - dual_emac : Specifies Switch to act as Dual EMAC
- syscon : Phandle to the system control device node, which is - syscon : Phandle to the system control device node, which is
the control module device of the am33x the control module device of the am33x
...@@ -70,7 +69,6 @@ Examples: ...@@ -70,7 +69,6 @@ Examples:
cpdma_channels = <8>; cpdma_channels = <8>;
ale_entries = <1024>; ale_entries = <1024>;
bd_ram_size = <0x2000>; bd_ram_size = <0x2000>;
no_bd_ram = <0>;
rx_descs = <64>; rx_descs = <64>;
mac_control = <0x20>; mac_control = <0x20>;
slaves = <2>; slaves = <2>;
...@@ -99,7 +97,6 @@ Examples: ...@@ -99,7 +97,6 @@ Examples:
cpdma_channels = <8>; cpdma_channels = <8>;
ale_entries = <1024>; ale_entries = <1024>;
bd_ram_size = <0x2000>; bd_ram_size = <0x2000>;
no_bd_ram = <0>;
rx_descs = <64>; rx_descs = <64>;
mac_control = <0x20>; mac_control = <0x20>;
slaves = <2>; slaves = <2>;
......
...@@ -781,7 +781,6 @@ mac: ethernet@4a100000 { ...@@ -781,7 +781,6 @@ mac: ethernet@4a100000 {
cpdma_channels = <8>; cpdma_channels = <8>;
ale_entries = <1024>; ale_entries = <1024>;
bd_ram_size = <0x2000>; bd_ram_size = <0x2000>;
no_bd_ram = <0>;
mac_control = <0x20>; mac_control = <0x20>;
slaves = <2>; slaves = <2>;
active_slave = <0>; active_slave = <0>;
......
...@@ -669,7 +669,6 @@ GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH ...@@ -669,7 +669,6 @@ GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH
cpdma_channels = <8>; cpdma_channels = <8>;
ale_entries = <1024>; ale_entries = <1024>;
bd_ram_size = <0x2000>; bd_ram_size = <0x2000>;
no_bd_ram = <0>;
mac_control = <0x20>; mac_control = <0x20>;
slaves = <2>; slaves = <2>;
active_slave = <0>; active_slave = <0>;
......
...@@ -509,7 +509,6 @@ mac: ethernet@4a100000 { ...@@ -509,7 +509,6 @@ mac: ethernet@4a100000 {
cpdma_channels = <8>; cpdma_channels = <8>;
ale_entries = <1024>; ale_entries = <1024>;
bd_ram_size = <0x2000>; bd_ram_size = <0x2000>;
no_bd_ram = <0>;
mac_control = <0x20>; mac_control = <0x20>;
slaves = <2>; slaves = <2>;
active_slave = <0>; active_slave = <0>;
......
...@@ -1707,7 +1707,6 @@ mac: ethernet@48484000 { ...@@ -1707,7 +1707,6 @@ mac: ethernet@48484000 {
cpdma_channels = <8>; cpdma_channels = <8>;
ale_entries = <1024>; ale_entries = <1024>;
bd_ram_size = <0x2000>; bd_ram_size = <0x2000>;
no_bd_ram = <0>;
mac_control = <0x20>; mac_control = <0x20>;
slaves = <2>; slaves = <2>;
active_slave = <0>; active_slave = <0>;
......
...@@ -145,6 +145,7 @@ do { \ ...@@ -145,6 +145,7 @@ do { \
cpsw->data.active_slave) cpsw->data.active_slave)
#define IRQ_NUM 2 #define IRQ_NUM 2
#define CPSW_MAX_QUEUES 8 #define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
static int debug_level; static int debug_level;
module_param(debug_level, int, 0); module_param(debug_level, int, 0);
...@@ -158,6 +159,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE; ...@@ -158,6 +159,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
module_param(rx_packet_max, int, 0); module_param(rx_packet_max, int, 0);
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
struct cpsw_wr_regs { struct cpsw_wr_regs {
u32 id_ver; u32 id_ver;
u32 soft_reset; u32 soft_reset;
...@@ -2479,6 +2484,90 @@ static int cpsw_nway_reset(struct net_device *ndev) ...@@ -2479,6 +2484,90 @@ static int cpsw_nway_reset(struct net_device *ndev)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
/* not supported */
ering->tx_max_pending = 0;
ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
/* Max 90% RX buffers */
ering->rx_max_pending = (descs_pool_size * 9) / 10;
ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
}
static int cpsw_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_slave *slave;
int i, ret;
/* ignore ering->tx_pending - only rx_pending adjustment is supported */
if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
ering->rx_pending < (descs_pool_size / 10) ||
ering->rx_pending > ((descs_pool_size * 9) / 10))
return -EINVAL;
if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
return 0;
/* Disable NAPI scheduling */
cpsw_intr_disable(cpsw);
/* Stop all transmit queues for every network device.
* Disable re-using rx descriptors with dormant_on.
*/
for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
if (!(slave->ndev && netif_running(slave->ndev)))
continue;
netif_tx_stop_all_queues(slave->ndev);
netif_dormant_on(slave->ndev);
}
/* Handle rest of tx packets and stop cpdma channels */
cpdma_ctlr_stop(cpsw->dma);
cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
if (!(slave->ndev && netif_running(slave->ndev)))
continue;
/* Enable rx packets handling */
netif_dormant_off(slave->ndev);
}
if (cpsw_common_res_usage_state(cpsw)) {
cpdma_chan_split_pool(cpsw->dma);
ret = cpsw_fill_rx_channels(priv);
if (ret)
goto err;
/* After this receive is started */
cpdma_ctlr_start(cpsw->dma);
cpsw_intr_enable(cpsw);
}
/* Resume transmit for every affected interface */
for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
if (!(slave->ndev && netif_running(slave->ndev)))
continue;
netif_tx_start_all_queues(slave->ndev);
}
return 0;
err:
dev_err(priv->dev, "cannot set ring params, closing device\n");
dev_close(ndev);
return ret;
}
static const struct ethtool_ops cpsw_ethtool_ops = { static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo, .get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel, .get_msglevel = cpsw_get_msglevel,
...@@ -2505,6 +2594,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { ...@@ -2505,6 +2594,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.get_eee = cpsw_get_eee, .get_eee = cpsw_get_eee,
.set_eee = cpsw_set_eee, .set_eee = cpsw_set_eee,
.nway_reset = cpsw_nway_reset, .nway_reset = cpsw_nway_reset,
.get_ringparam = cpsw_get_ringparam,
.set_ringparam = cpsw_set_ringparam,
}; };
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw, static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
...@@ -2969,6 +3060,7 @@ static int cpsw_probe(struct platform_device *pdev) ...@@ -2969,6 +3060,7 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.has_ext_regs = true; dma_params.has_ext_regs = true;
dma_params.desc_hw_addr = dma_params.desc_mem_phys; dma_params.desc_hw_addr = dma_params.desc_mem_phys;
dma_params.bus_freq_mhz = cpsw->bus_freq_mhz; dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
dma_params.descs_pool_size = descs_pool_size;
cpsw->dma = cpdma_ctlr_create(&dma_params); cpsw->dma = cpdma_ctlr_create(&dma_params);
if (!cpsw->dma) { if (!cpsw->dma) {
...@@ -3072,9 +3164,9 @@ static int cpsw_probe(struct platform_device *pdev) ...@@ -3072,9 +3164,9 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret; goto clean_ale_ret;
} }
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", cpsw_notice(priv, probe,
&ss_res->start, ndev->irq); "initialized device (regs %pa, irq %d, pool size %d)\n",
&ss_res->start, ndev->irq, dma_params.descs_pool_size);
if (cpsw->data.dual_emac) { if (cpsw->data.dual_emac) {
ret = cpsw_probe_dual_emac(priv); ret = cpsw_probe_dual_emac(priv);
if (ret) { if (ret) {
......
...@@ -108,6 +108,8 @@ struct cpdma_ctlr { ...@@ -108,6 +108,8 @@ struct cpdma_ctlr {
spinlock_t lock; spinlock_t lock;
struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
int chan_num; int chan_num;
int num_rx_desc; /* RX descriptors number */
int num_tx_desc; /* TX descriptors number */
}; };
struct cpdma_chan { struct cpdma_chan {
...@@ -166,12 +168,12 @@ static struct cpdma_control_info controls[] = { ...@@ -166,12 +168,12 @@ static struct cpdma_control_info controls[] = {
#define num_chan params.num_chan #define num_chan params.num_chan
/* various accessors */ /* various accessors */
#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
#define chan_read(chan, fld) __raw_readl((chan)->fld) #define chan_read(chan, fld) readl((chan)->fld)
#define desc_read(desc, fld) __raw_readl(&(desc)->fld) #define desc_read(desc, fld) readl(&(desc)->fld)
#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) #define chan_write(chan, fld, v) writel(v, (chan)->fld)
#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
#define cpdma_desc_to_port(chan, mode, directed) \ #define cpdma_desc_to_port(chan, mode, directed) \
do { \ do { \
...@@ -181,8 +183,10 @@ static struct cpdma_control_info controls[] = { ...@@ -181,8 +183,10 @@ static struct cpdma_control_info controls[] = {
(directed << CPDMA_TO_PORT_SHIFT)); \ (directed << CPDMA_TO_PORT_SHIFT)); \
} while (0) } while (0)
static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
{ {
struct cpdma_desc_pool *pool = ctlr->pool;
if (!pool) if (!pool)
return; return;
...@@ -191,10 +195,8 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) ...@@ -191,10 +195,8 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
gen_pool_size(pool->gen_pool), gen_pool_size(pool->gen_pool),
gen_pool_avail(pool->gen_pool)); gen_pool_avail(pool->gen_pool));
if (pool->cpumap) if (pool->cpumap)
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
pool->phys); pool->phys);
else
iounmap(pool->iomap);
} }
/* /*
...@@ -203,37 +205,50 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) ...@@ -203,37 +205,50 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
* devices (e.g. cpsw switches) use plain old memory. Descriptor pools * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
* abstract out these details * abstract out these details
*/ */
static struct cpdma_desc_pool * int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
int size, int align)
{ {
struct cpdma_params *cpdma_params = &ctlr->params;
struct cpdma_desc_pool *pool; struct cpdma_desc_pool *pool;
int ret; int ret = -ENOMEM;
pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
if (!pool) if (!pool)
goto gen_pool_create_fail; goto gen_pool_create_fail;
ctlr->pool = pool;
pool->mem_size = cpdma_params->desc_mem_size;
pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
cpdma_params->desc_align);
pool->num_desc = pool->mem_size / pool->desc_size;
if (cpdma_params->descs_pool_size) {
/* recalculate memory size required cpdma descriptor pool
* basing on number of descriptors specified by user and
* if memory size > CPPI internal RAM size (desc_mem_size)
* then switch to use DDR
*/
pool->num_desc = cpdma_params->descs_pool_size;
pool->mem_size = pool->desc_size * pool->num_desc;
if (pool->mem_size > cpdma_params->desc_mem_size)
cpdma_params->desc_mem_phys = 0;
}
pool->dev = dev; pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
pool->mem_size = size; -1, "cpdma");
pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
pool->num_desc = size / pool->desc_size;
pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
"cpdma");
if (IS_ERR(pool->gen_pool)) { if (IS_ERR(pool->gen_pool)) {
dev_err(dev, "pool create failed %ld\n", ret = PTR_ERR(pool->gen_pool);
PTR_ERR(pool->gen_pool)); dev_err(ctlr->dev, "pool create failed %d\n", ret);
goto gen_pool_create_fail; goto gen_pool_create_fail;
} }
if (phys) { if (cpdma_params->desc_mem_phys) {
pool->phys = phys; pool->phys = cpdma_params->desc_mem_phys;
pool->iomap = ioremap(phys, size); /* should be memremap? */ pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
pool->hw_addr = hw_addr; pool->mem_size);
pool->hw_addr = cpdma_params->desc_hw_addr;
} else { } else {
pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
GFP_KERNEL); &pool->hw_addr, GFP_KERNEL);
pool->iomap = (void __iomem __force *)pool->cpumap; pool->iomap = (void __iomem __force *)pool->cpumap;
pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
} }
...@@ -244,16 +259,17 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, ...@@ -244,16 +259,17 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
pool->phys, pool->mem_size, -1); pool->phys, pool->mem_size, -1);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "pool add failed %d\n", ret); dev_err(ctlr->dev, "pool add failed %d\n", ret);
goto gen_pool_add_virt_fail; goto gen_pool_add_virt_fail;
} }
return pool; return 0;
gen_pool_add_virt_fail: gen_pool_add_virt_fail:
cpdma_desc_pool_destroy(pool); cpdma_desc_pool_destroy(ctlr);
gen_pool_create_fail: gen_pool_create_fail:
return NULL; ctlr->pool = NULL;
return ret;
} }
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
...@@ -502,13 +518,11 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) ...@@ -502,13 +518,11 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->chan_num = 0; ctlr->chan_num = 0;
spin_lock_init(&ctlr->lock); spin_lock_init(&ctlr->lock);
ctlr->pool = cpdma_desc_pool_create(ctlr->dev, if (cpdma_desc_pool_create(ctlr))
ctlr->params.desc_mem_phys,
ctlr->params.desc_hw_addr,
ctlr->params.desc_mem_size,
ctlr->params.desc_align);
if (!ctlr->pool)
return NULL; return NULL;
/* split pool equally between RX/TX by default */
ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
ctlr->num_chan = CPDMA_MAX_CHANNELS; ctlr->num_chan = CPDMA_MAX_CHANNELS;
...@@ -542,10 +556,10 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) ...@@ -542,10 +556,10 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
} }
for (i = 0; i < ctlr->num_chan; i++) { for (i = 0; i < ctlr->num_chan; i++) {
__raw_writel(0, ctlr->params.txhdp + 4 * i); writel(0, ctlr->params.txhdp + 4 * i);
__raw_writel(0, ctlr->params.rxhdp + 4 * i); writel(0, ctlr->params.rxhdp + 4 * i);
__raw_writel(0, ctlr->params.txcp + 4 * i); writel(0, ctlr->params.txcp + 4 * i);
__raw_writel(0, ctlr->params.rxcp + 4 * i); writel(0, ctlr->params.rxcp + 4 * i);
} }
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
...@@ -623,7 +637,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) ...@@ -623,7 +637,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
cpdma_chan_destroy(ctlr->channels[i]); cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool); cpdma_desc_pool_destroy(ctlr);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
...@@ -708,22 +722,22 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, ...@@ -708,22 +722,22 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
} }
} }
/* use remains */ /* use remains */
most_chan->desc_num += desc_cnt; if (most_chan)
most_chan->desc_num += desc_cnt;
} }
/** /**
* cpdma_chan_split_pool - Splits ctrl pool between all channels. * cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock * Has to be called under ctlr lock
*/ */
static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
{ {
int tx_per_ch_desc = 0, rx_per_ch_desc = 0; int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
struct cpdma_desc_pool *pool = ctlr->pool;
int free_rx_num = 0, free_tx_num = 0; int free_rx_num = 0, free_tx_num = 0;
int rx_weight = 0, tx_weight = 0; int rx_weight = 0, tx_weight = 0;
int tx_desc_num, rx_desc_num; int tx_desc_num, rx_desc_num;
struct cpdma_chan *chan; struct cpdma_chan *chan;
int i, tx_num = 0; int i;
if (!ctlr->chan_num) if (!ctlr->chan_num)
return 0; return 0;
...@@ -741,15 +755,14 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) ...@@ -741,15 +755,14 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
if (!chan->weight) if (!chan->weight)
free_tx_num++; free_tx_num++;
tx_weight += chan->weight; tx_weight += chan->weight;
tx_num++;
} }
} }
if (rx_weight > 100 || tx_weight > 100) if (rx_weight > 100 || tx_weight > 100)
return -EINVAL; return -EINVAL;
tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num; tx_desc_num = ctlr->num_tx_desc;
rx_desc_num = pool->num_desc - tx_desc_num; rx_desc_num = ctlr->num_rx_desc;
if (free_tx_num) { if (free_tx_num) {
tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
...@@ -765,6 +778,8 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) ...@@ -765,6 +778,8 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(cpdma_chan_split_pool);
/* cpdma_chan_set_weight - set weight of a channel in percentage. /* cpdma_chan_set_weight - set weight of a channel in percentage.
* Tx and Rx channels have separate weights. That is 100% for RX * Tx and Rx channels have separate weights. That is 100% for RX
...@@ -898,7 +913,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, ...@@ -898,7 +913,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan->chan_num = chan_num; chan->chan_num = chan_num;
chan->handler = handler; chan->handler = handler;
chan->rate = 0; chan->rate = 0;
chan->desc_num = ctlr->pool->num_desc / 2;
chan->weight = 0; chan->weight = 0;
if (is_rx_chan(chan)) { if (is_rx_chan(chan)) {
...@@ -1061,13 +1075,17 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, ...@@ -1061,13 +1075,17 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
cpdma_desc_to_port(chan, mode, directed); cpdma_desc_to_port(chan, mode, directed);
desc_write(desc, hw_next, 0); /* Relaxed IO accessors can be used here as there is read barrier
desc_write(desc, hw_buffer, buffer); * at the end of write sequence.
desc_write(desc, hw_len, len); */
desc_write(desc, hw_mode, mode | len); writel_relaxed(0, &desc->hw_next);
desc_write(desc, sw_token, token); writel_relaxed(buffer, &desc->hw_buffer);
desc_write(desc, sw_buffer, buffer); writel_relaxed(len, &desc->hw_len);
desc_write(desc, sw_len, len); writel_relaxed(mode | len, &desc->hw_mode);
writel_relaxed(token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
writel_relaxed(len, &desc->sw_len);
desc_read(desc, sw_len);
__cpdma_chan_submit(chan, desc); __cpdma_chan_submit(chan, desc);
...@@ -1136,7 +1154,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) ...@@ -1136,7 +1154,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
} }
desc_dma = desc_phys(pool, desc); desc_dma = desc_phys(pool, desc);
status = __raw_readl(&desc->hw_mode); status = desc_read(desc, hw_mode);
outlen = status & 0x7ff; outlen = status & 0x7ff;
if (status & CPDMA_DESC_OWNER) { if (status & CPDMA_DESC_OWNER) {
chan->stats.busy_dequeue++; chan->stats.busy_dequeue++;
...@@ -1155,7 +1173,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan) ...@@ -1155,7 +1173,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
chan->count--; chan->count--;
chan->stats.good_dequeue++; chan->stats.good_dequeue++;
if (status & CPDMA_DESC_EOQ) { if ((status & CPDMA_DESC_EOQ) && chan->head) {
chan->stats.requeue++; chan->stats.requeue++;
chan_write(chan, hdp, desc_phys(pool, chan->head)); chan_write(chan, hdp, desc_phys(pool, chan->head));
} }
...@@ -1316,4 +1334,23 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) ...@@ -1316,4 +1334,23 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
} }
EXPORT_SYMBOL_GPL(cpdma_control_set); EXPORT_SYMBOL_GPL(cpdma_control_set);
int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
{
return ctlr->num_rx_desc;
}
EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs);
int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
{
return ctlr->num_tx_desc;
}
EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs);
void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
{
ctlr->num_rx_desc = num_rx_desc;
ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
}
EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -37,6 +37,7 @@ struct cpdma_params { ...@@ -37,6 +37,7 @@ struct cpdma_params {
int desc_mem_size; int desc_mem_size;
int desc_align; int desc_align;
u32 bus_freq_mhz; u32 bus_freq_mhz;
u32 descs_pool_size;
/* /*
* Some instances of embedded cpdma controllers have extra control and * Some instances of embedded cpdma controllers have extra control and
...@@ -113,5 +114,9 @@ enum cpdma_control { ...@@ -113,5 +114,9 @@ enum cpdma_control {
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control); int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value); int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment