Commit e6e2df69 authored by Roy Pledge's avatar Roy Pledge Committed by Li Yang

soc/fsl/qbman: Rework portal mapping calls for ARM/PPC

Rework portal mapping for PPC and ARM. The PPC devices require a
cacheable coherent mapping while ARM will work with a non-cachable/write
combine mapping. This also eliminates the need for manual cache
flushes on ARM. This also fixes the code so sparse checking is clean.
Signed-off-by: default avatarRoy Pledge <roy.pledge@nxp.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarLi Yang <leoyang.li@nxp.com>
parent 219e8e05
...@@ -154,7 +154,8 @@ struct bm_mc { ...@@ -154,7 +154,8 @@ struct bm_mc {
}; };
struct bm_addr { struct bm_addr {
void __iomem *ce; /* cache-enabled */ void *ce; /* cache-enabled */
__be32 *ce_be; /* Same as above but for direct access */
void __iomem *ci; /* cache-inhibited */ void __iomem *ci; /* cache-inhibited */
}; };
...@@ -167,12 +168,12 @@ struct bm_portal { ...@@ -167,12 +168,12 @@ struct bm_portal {
/* Cache-inhibited register access. */ /* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset) static inline u32 bm_in(struct bm_portal *p, u32 offset)
{ {
return be32_to_cpu(__raw_readl(p->addr.ci + offset)); return ioread32be(p->addr.ci + offset);
} }
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{ {
__raw_writel(cpu_to_be32(val), p->addr.ci + offset); iowrite32be(val, p->addr.ci + offset);
} }
/* Cache Enabled Portal Access */ /* Cache Enabled Portal Access */
...@@ -188,7 +189,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) ...@@ -188,7 +189,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{ {
return be32_to_cpu(__raw_readl(p->addr.ce + offset)); return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
} }
struct bman_portal { struct bman_portal {
...@@ -408,7 +409,7 @@ static int bm_mc_init(struct bm_portal *portal) ...@@ -408,7 +409,7 @@ static int bm_mc_init(struct bm_portal *portal)
mc->cr = portal->addr.ce + BM_CL_CR; mc->cr = portal->addr.ce + BM_CL_CR;
mc->rr = portal->addr.ce + BM_CL_RR0; mc->rr = portal->addr.ce + BM_CL_RR0;
mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
0 : 1; 0 : 1;
mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING #ifdef CONFIG_FSL_DPAA_CHECKING
...@@ -466,7 +467,7 @@ static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) ...@@ -466,7 +467,7 @@ static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
* its command is submitted and completed. This includes the valid-bit, * its command is submitted and completed. This includes the valid-bit,
* in case you were wondering... * in case you were wondering...
*/ */
if (!__raw_readb(&rr->verb)) { if (!rr->verb) {
dpaa_invalidate_touch_ro(rr); dpaa_invalidate_touch_ro(rr);
return NULL; return NULL;
} }
...@@ -512,8 +513,9 @@ static int bman_create_portal(struct bman_portal *portal, ...@@ -512,8 +513,9 @@ static int bman_create_portal(struct bman_portal *portal,
* config, everything that follows depends on it and "config" is more * config, everything that follows depends on it and "config" is more
* for (de)reference... * for (de)reference...
*/ */
p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; p->addr.ce = c->addr_virt_ce;
p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; p->addr.ce_be = c->addr_virt_ce;
p->addr.ci = c->addr_virt_ci;
if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
dev_err(c->dev, "RCR initialisation failed\n"); dev_err(c->dev, "RCR initialisation failed\n");
goto fail_rcr; goto fail_rcr;
......
...@@ -91,7 +91,6 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -91,7 +91,6 @@ static int bman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node; struct device_node *node = dev->of_node;
struct bm_portal_config *pcfg; struct bm_portal_config *pcfg;
struct resource *addr_phys[2]; struct resource *addr_phys[2];
void __iomem *va;
int irq, cpu; int irq, cpu;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
...@@ -123,23 +122,21 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -123,23 +122,21 @@ static int bman_portal_probe(struct platform_device *pdev)
} }
pcfg->irq = irq; pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
if (!va) { resource_size(addr_phys[0]),
dev_err(dev, "ioremap::CE failed\n"); QBMAN_MEMREMAP_ATTR);
if (!pcfg->addr_virt_ce) {
dev_err(dev, "memremap::CE failed\n");
goto err_ioremap1; goto err_ioremap1;
} }
pcfg->addr_virt[DPAA_PORTAL_CE] = va; pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
resource_size(addr_phys[1]));
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), if (!pcfg->addr_virt_ci) {
_PAGE_GUARDED | _PAGE_NO_CACHE);
if (!va) {
dev_err(dev, "ioremap::CI failed\n"); dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2; goto err_ioremap2;
} }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
spin_lock(&bman_lock); spin_lock(&bman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus); cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) { if (cpu >= nr_cpu_ids) {
...@@ -164,9 +161,9 @@ static int bman_portal_probe(struct platform_device *pdev) ...@@ -164,9 +161,9 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0; return 0;
err_portal_init: err_portal_init:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); iounmap(pcfg->addr_virt_ci);
err_ioremap2: err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); memunmap(pcfg->addr_virt_ce);
err_ioremap1: err_ioremap1:
return -ENXIO; return -ENXIO;
} }
......
...@@ -46,11 +46,9 @@ extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */ ...@@ -46,11 +46,9 @@ extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
extern struct gen_pool *bm_bpalloc; extern struct gen_pool *bm_bpalloc;
struct bm_portal_config { struct bm_portal_config {
/* /* Portal addresses */
* Corenet portal addresses; void *addr_virt_ce;
* [0]==cache-enabled, [1]==cache-inhibited. void __iomem *addr_virt_ci;
*/
void __iomem *addr_virt[2];
/* Allow these to be joined in lists */ /* Allow these to be joined in lists */
struct list_head list; struct list_head list;
struct device *dev; struct device *dev;
......
...@@ -51,12 +51,12 @@ ...@@ -51,12 +51,12 @@
static inline void dpaa_flush(void *p) static inline void dpaa_flush(void *p)
{ {
/*
* Only PPC needs to flush the cache currently - on ARM the mapping
* is non cacheable
*/
#ifdef CONFIG_PPC #ifdef CONFIG_PPC
flush_dcache_range((unsigned long)p, (unsigned long)p+64); flush_dcache_range((unsigned long)p, (unsigned long)p+64);
#elif defined(CONFIG_ARM)
__cpuc_flush_dcache_area(p, 64);
#elif defined(CONFIG_ARM64)
__flush_dcache_area(p, 64);
#endif #endif
} }
...@@ -102,4 +102,11 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last) ...@@ -102,4 +102,11 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr, int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
size_t *size); size_t *size);
/* memremap() attributes for different platforms */
#ifdef CONFIG_PPC
#define QBMAN_MEMREMAP_ATTR MEMREMAP_WB
#else
#define QBMAN_MEMREMAP_ATTR MEMREMAP_WC
#endif
#endif /* __DPAA_SYS_H */ #endif /* __DPAA_SYS_H */
...@@ -300,7 +300,8 @@ struct qm_mc { ...@@ -300,7 +300,8 @@ struct qm_mc {
}; };
struct qm_addr { struct qm_addr {
void __iomem *ce; /* cache-enabled */ void *ce; /* cache-enabled */
__be32 *ce_be; /* same value as above but for direct access */
void __iomem *ci; /* cache-inhibited */ void __iomem *ci; /* cache-inhibited */
}; };
...@@ -321,12 +322,12 @@ struct qm_portal { ...@@ -321,12 +322,12 @@ struct qm_portal {
/* Cache-inhibited register access. */ /* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset) static inline u32 qm_in(struct qm_portal *p, u32 offset)
{ {
return be32_to_cpu(__raw_readl(p->addr.ci + offset)); return ioread32be(p->addr.ci + offset);
} }
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{ {
__raw_writel(cpu_to_be32(val), p->addr.ci + offset); iowrite32be(val, p->addr.ci + offset);
} }
/* Cache Enabled Portal Access */ /* Cache Enabled Portal Access */
...@@ -342,7 +343,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) ...@@ -342,7 +343,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{ {
return be32_to_cpu(__raw_readl(p->addr.ce + offset)); return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
} }
/* --- EQCR API --- */ /* --- EQCR API --- */
...@@ -646,11 +647,7 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal) ...@@ -646,11 +647,7 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
*/ */
dpaa_invalidate_touch_ro(res); dpaa_invalidate_touch_ro(res);
#endif #endif
/* if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
* when accessing 'verb', use __raw_readb() to ensure that compiler
* inlining doesn't try to optimise out "excess reads".
*/
if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
if (!dqrr->pi) if (!dqrr->pi)
dqrr->vbit ^= QM_DQRR_VERB_VBIT; dqrr->vbit ^= QM_DQRR_VERB_VBIT;
...@@ -777,11 +774,8 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal) ...@@ -777,11 +774,8 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
DPAA_ASSERT(mr->pmode == qm_mr_pvb); DPAA_ASSERT(mr->pmode == qm_mr_pvb);
/*
* when accessing 'verb', use __raw_readb() to ensure that compiler if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
* inlining doesn't try to optimise out "excess reads".
*/
if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
if (!mr->pi) if (!mr->pi)
mr->vbit ^= QM_MR_VERB_VBIT; mr->vbit ^= QM_MR_VERB_VBIT;
...@@ -822,7 +816,7 @@ static inline int qm_mc_init(struct qm_portal *portal) ...@@ -822,7 +816,7 @@ static inline int qm_mc_init(struct qm_portal *portal)
mc->cr = portal->addr.ce + QM_CL_CR; mc->cr = portal->addr.ce + QM_CL_CR;
mc->rr = portal->addr.ce + QM_CL_RR0; mc->rr = portal->addr.ce + QM_CL_RR0;
mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
? 0 : 1; ? 0 : 1;
mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING #ifdef CONFIG_FSL_DPAA_CHECKING
...@@ -880,7 +874,7 @@ static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) ...@@ -880,7 +874,7 @@ static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
* its command is submitted and completed. This includes the valid-bit, * its command is submitted and completed. This includes the valid-bit,
* in case you were wondering... * in case you were wondering...
*/ */
if (!__raw_readb(&rr->verb)) { if (!rr->verb) {
dpaa_invalidate_touch_ro(rr); dpaa_invalidate_touch_ro(rr);
return NULL; return NULL;
} }
...@@ -1120,8 +1114,9 @@ static int qman_create_portal(struct qman_portal *portal, ...@@ -1120,8 +1114,9 @@ static int qman_create_portal(struct qman_portal *portal,
* config, everything that follows depends on it and "config" is more * config, everything that follows depends on it and "config" is more
* for (de)reference * for (de)reference
*/ */
p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; p->addr.ce = c->addr_virt_ce;
p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; p->addr.ce_be = c->addr_virt_ce;
p->addr.ci = c->addr_virt_ci;
/* /*
* If CI-stashing is used, the current defaults use a threshold of 3, * If CI-stashing is used, the current defaults use a threshold of 3,
* and stash with high-than-DQRR priority. * and stash with high-than-DQRR priority.
......
...@@ -224,7 +224,6 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -224,7 +224,6 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node; struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg; struct qm_portal_config *pcfg;
struct resource *addr_phys[2]; struct resource *addr_phys[2];
void __iomem *va;
int irq, cpu, err; int irq, cpu, err;
u32 val; u32 val;
...@@ -262,23 +261,21 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -262,23 +261,21 @@ static int qman_portal_probe(struct platform_device *pdev)
} }
pcfg->irq = irq; pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0); pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
if (!va) { resource_size(addr_phys[0]),
dev_err(dev, "ioremap::CE failed\n"); QBMAN_MEMREMAP_ATTR);
if (!pcfg->addr_virt_ce) {
dev_err(dev, "memremap::CE failed\n");
goto err_ioremap1; goto err_ioremap1;
} }
pcfg->addr_virt[DPAA_PORTAL_CE] = va; pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
resource_size(addr_phys[1]));
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]), if (!pcfg->addr_virt_ci) {
_PAGE_GUARDED | _PAGE_NO_CACHE);
if (!va) {
dev_err(dev, "ioremap::CI failed\n"); dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2; goto err_ioremap2;
} }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
pcfg->pools = qm_get_pools_sdqcr(); pcfg->pools = qm_get_pools_sdqcr();
spin_lock(&qman_lock); spin_lock(&qman_lock);
...@@ -310,9 +307,9 @@ static int qman_portal_probe(struct platform_device *pdev) ...@@ -310,9 +307,9 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0; return 0;
err_portal_init: err_portal_init:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]); iounmap(pcfg->addr_virt_ci);
err_ioremap2: err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]); memunmap(pcfg->addr_virt_ce);
err_ioremap1: err_ioremap1:
return -ENXIO; return -ENXIO;
} }
......
...@@ -153,11 +153,9 @@ static inline void qman_cgrs_xor(struct qman_cgrs *dest, ...@@ -153,11 +153,9 @@ static inline void qman_cgrs_xor(struct qman_cgrs *dest,
void qman_init_cgr_all(void); void qman_init_cgr_all(void);
struct qm_portal_config { struct qm_portal_config {
/* /* Portal addresses */
* Corenet portal addresses; void *addr_virt_ce;
* [0]==cache-enabled, [1]==cache-inhibited. void __iomem *addr_virt_ci;
*/
void __iomem *addr_virt[2];
struct device *dev; struct device *dev;
struct iommu_domain *iommu_domain; struct iommu_domain *iommu_domain;
/* Allow these to be joined in lists */ /* Allow these to be joined in lists */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment