Commit df089aa0 authored by Thomas Petazzoni's avatar Thomas Petazzoni Committed by David S. Miller

net: mvpp2: move from cpu-centric naming to "software thread" naming

The PPv2.2 IP has a concept of "software thread", with all registers
of the PPv2.2 mapped 8 times, for concurrent accesses by 8 "software
threads". In addition, interrupts on RX queues are associated to such
"software thread".

For most cases, we map a "software thread" to the more conventional
concept of CPU, but we will soon have one exception: we will have a
model where we have one TX interrupt per CPU (each using one software
thread), and all RX events mapped to another software thread
(associated to another interrupt).

In preparation for this change, it makes sense to change the naming
from MVPP2_MAX_CPUS to MVPP2_MAX_THREADS, and plan for 8 software
threads instead of 4 currently.
Signed-off-by: default avatarThomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 09f83975
...@@ -685,7 +685,7 @@ enum mvpp2_prs_l3_cast { ...@@ -685,7 +685,7 @@ enum mvpp2_prs_l3_cast {
#define MVPP21_ADDR_SPACE_SZ 0 #define MVPP21_ADDR_SPACE_SZ 0
#define MVPP22_ADDR_SPACE_SZ SZ_64K #define MVPP22_ADDR_SPACE_SZ SZ_64K
#define MVPP2_MAX_CPUS 4 #define MVPP2_MAX_THREADS 8
enum mvpp2_bm_type { enum mvpp2_bm_type {
MVPP2_BM_FREE, MVPP2_BM_FREE,
...@@ -701,11 +701,12 @@ struct mvpp2 { ...@@ -701,11 +701,12 @@ struct mvpp2 {
void __iomem *lms_base; void __iomem *lms_base;
void __iomem *iface_base; void __iomem *iface_base;
/* On PPv2.2, each CPU can access the base register through a /* On PPv2.2, each "software thread" can access the base
* separate address space, each 64 KB apart from each * register through a separate address space, each 64 KB apart
* other. * from each other. Typically, such address spaces will be
* used per CPU.
*/ */
void __iomem *cpu_base[MVPP2_MAX_CPUS]; void __iomem *swth_base[MVPP2_MAX_THREADS];
/* Common clocks */ /* Common clocks */
struct clk *pp_clk; struct clk *pp_clk;
...@@ -1071,12 +1072,12 @@ struct mvpp2_bm_pool { ...@@ -1071,12 +1072,12 @@ struct mvpp2_bm_pool {
static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
{ {
writel(data, priv->cpu_base[0] + offset); writel(data, priv->swth_base[0] + offset);
} }
static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
{ {
return readl(priv->cpu_base[0] + offset); return readl(priv->swth_base[0] + offset);
} }
/* These accessors should be used to access: /* These accessors should be used to access:
...@@ -1118,13 +1119,13 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) ...@@ -1118,13 +1119,13 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
u32 offset, u32 data) u32 offset, u32 data)
{ {
writel(data, priv->cpu_base[cpu] + offset); writel(data, priv->swth_base[cpu] + offset);
} }
static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
u32 offset) u32 offset)
{ {
return readl(priv->cpu_base[cpu] + offset); return readl(priv->swth_base[cpu] + offset);
} }
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
...@@ -6874,7 +6875,7 @@ static int mvpp2_probe(struct platform_device *pdev) ...@@ -6874,7 +6875,7 @@ static int mvpp2_probe(struct platform_device *pdev)
struct mvpp2 *priv; struct mvpp2 *priv;
struct resource *res; struct resource *res;
void __iomem *base; void __iomem *base;
int port_count, cpu; int port_count, i;
int err; int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
...@@ -6901,12 +6902,12 @@ static int mvpp2_probe(struct platform_device *pdev) ...@@ -6901,12 +6902,12 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->iface_base); return PTR_ERR(priv->iface_base);
} }
for_each_present_cpu(cpu) { for (i = 0; i < MVPP2_MAX_THREADS; i++) {
u32 addr_space_sz; u32 addr_space_sz;
addr_space_sz = (priv->hw_version == MVPP21 ? addr_space_sz = (priv->hw_version == MVPP21 ?
MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
priv->cpu_base[cpu] = base + cpu * addr_space_sz; priv->swth_base[i] = base + i * addr_space_sz;
} }
if (priv->hw_version == MVPP21) if (priv->hw_version == MVPP21)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment