Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e6e2df69 authored by Roy Pledge's avatar Roy Pledge Committed by Li Yang
Browse files

soc/fsl/qbman: Rework portal mapping calls for ARM/PPC



Rework portal mapping for PPC and ARM. The PPC devices require a
cacheable coherent mapping while ARM will work with a non-cachable/write
combine mapping. This also eliminates the need for manual cache
flushes on ARM. This also fixes the code so sparse checking is clean.

Signed-off-by: default avatarRoy Pledge <roy.pledge@nxp.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarLi Yang <leoyang.li@nxp.com>
parent 219e8e05
Loading
Loading
Loading
Loading
+10 −8
Original line number Diff line number Diff line
@@ -154,7 +154,8 @@ struct bm_mc {
};

struct bm_addr {
	void __iomem *ce;	/* cache-enabled */
	void *ce;		/* cache-enabled */
	__be32 *ce_be;		/* Same as above but for direct access */
	void __iomem *ci;	/* cache-inhibited */
};

@@ -167,12 +168,12 @@ struct bm_portal {
/* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset)
{
	return be32_to_cpu(__raw_readl(p->addr.ci + offset));
	return ioread32be(p->addr.ci + offset);
}

static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{
	__raw_writel(cpu_to_be32(val), p->addr.ci + offset);
	iowrite32be(val, p->addr.ci + offset);
}

/* Cache Enabled Portal Access */
@@ -188,7 +189,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)

static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{
	return be32_to_cpu(__raw_readl(p->addr.ce + offset));
	return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
}

struct bman_portal {
@@ -408,7 +409,7 @@ static int bm_mc_init(struct bm_portal *portal)

	mc->cr = portal->addr.ce + BM_CL_CR;
	mc->rr = portal->addr.ce + BM_CL_RR0;
	mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
	mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
		    0 : 1;
	mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING
@@ -466,7 +467,7 @@ static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
	 * its command is submitted and completed. This includes the valid-bit,
	 * in case you were wondering...
	 */
	if (!__raw_readb(&rr->verb)) {
	if (!rr->verb) {
		dpaa_invalidate_touch_ro(rr);
		return NULL;
	}
@@ -512,8 +513,9 @@ static int bman_create_portal(struct bman_portal *portal,
	 * config, everything that follows depends on it and "config" is more
	 * for (de)reference...
	 */
	p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
	p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
	p->addr.ce = c->addr_virt_ce;
	p->addr.ce_be = c->addr_virt_ce;
	p->addr.ci = c->addr_virt_ci;
	if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
		dev_err(c->dev, "RCR initialisation failed\n");
		goto fail_rcr;
+10 −13
Original line number Diff line number Diff line
@@ -91,7 +91,6 @@ static int bman_portal_probe(struct platform_device *pdev)
	struct device_node *node = dev->of_node;
	struct bm_portal_config *pcfg;
	struct resource *addr_phys[2];
	void __iomem *va;
	int irq, cpu;

	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
@@ -123,23 +122,21 @@ static int bman_portal_probe(struct platform_device *pdev)
	}
	pcfg->irq = irq;

	va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
	if (!va) {
		dev_err(dev, "ioremap::CE failed\n");
	pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
					resource_size(addr_phys[0]),
					QBMAN_MEMREMAP_ATTR);
	if (!pcfg->addr_virt_ce) {
		dev_err(dev, "memremap::CE failed\n");
		goto err_ioremap1;
	}

	pcfg->addr_virt[DPAA_PORTAL_CE] = va;

	va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
			  _PAGE_GUARDED | _PAGE_NO_CACHE);
	if (!va) {
	pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
					resource_size(addr_phys[1]));
	if (!pcfg->addr_virt_ci) {
		dev_err(dev, "ioremap::CI failed\n");
		goto err_ioremap2;
	}

	pcfg->addr_virt[DPAA_PORTAL_CI] = va;

	spin_lock(&bman_lock);
	cpu = cpumask_next_zero(-1, &portal_cpus);
	if (cpu >= nr_cpu_ids) {
@@ -164,9 +161,9 @@ static int bman_portal_probe(struct platform_device *pdev)
	return 0;

err_portal_init:
	iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
	iounmap(pcfg->addr_virt_ci);
err_ioremap2:
	iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
	memunmap(pcfg->addr_virt_ce);
err_ioremap1:
	return -ENXIO;
}
+3 −5
Original line number Diff line number Diff line
@@ -46,11 +46,9 @@ extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
extern struct gen_pool *bm_bpalloc;

struct bm_portal_config {
	/*
	 * Corenet portal addresses;
	 * [0]==cache-enabled, [1]==cache-inhibited.
	 */
	void __iomem *addr_virt[2];
	/* Portal addresses */
	void  *addr_virt_ce;
	void __iomem *addr_virt_ci;
	/* Allow these to be joined in lists */
	struct list_head list;
	struct device *dev;
+11 −4
Original line number Diff line number Diff line
@@ -51,12 +51,12 @@

static inline void dpaa_flush(void *p)
{
	/*
	 * Only PPC needs to flush the cache currently - on ARM the mapping
	 * is non cacheable
	 */
#ifdef CONFIG_PPC
	flush_dcache_range((unsigned long)p, (unsigned long)p+64);
#elif defined(CONFIG_ARM)
	__cpuc_flush_dcache_area(p, 64);
#elif defined(CONFIG_ARM64)
	__flush_dcache_area(p, 64);
#endif
}

@@ -102,4 +102,11 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
				size_t *size);

/* memremap() attributes for different platforms */
#ifdef CONFIG_PPC
#define QBMAN_MEMREMAP_ATTR	MEMREMAP_WB
#else
#define QBMAN_MEMREMAP_ATTR	MEMREMAP_WC
#endif

#endif	/* __DPAA_SYS_H */
+13 −18
Original line number Diff line number Diff line
@@ -300,7 +300,8 @@ struct qm_mc {
};

struct qm_addr {
	void __iomem *ce;	/* cache-enabled */
	void *ce;		/* cache-enabled */
	__be32 *ce_be;		/* same value as above but for direct access */
	void __iomem *ci;	/* cache-inhibited */
};

@@ -321,12 +322,12 @@ struct qm_portal {
/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{
	return be32_to_cpu(__raw_readl(p->addr.ci + offset));
	return ioread32be(p->addr.ci + offset);
}

static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{
	__raw_writel(cpu_to_be32(val), p->addr.ci + offset);
	iowrite32be(val, p->addr.ci + offset);
}

/* Cache Enabled Portal Access */
@@ -342,7 +343,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)

static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
	return be32_to_cpu(__raw_readl(p->addr.ce + offset));
	return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
}

/* --- EQCR API --- */
@@ -646,11 +647,7 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
	 */
	dpaa_invalidate_touch_ro(res);
#endif
	/*
	 *  when accessing 'verb', use __raw_readb() to ensure that compiler
	 * inlining doesn't try to optimise out "excess reads".
	 */
	if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
	if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
		dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
		if (!dqrr->pi)
			dqrr->vbit ^= QM_DQRR_VERB_VBIT;
@@ -777,11 +774,8 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
	union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);

	DPAA_ASSERT(mr->pmode == qm_mr_pvb);
	/*
	 *  when accessing 'verb', use __raw_readb() to ensure that compiler
	 * inlining doesn't try to optimise out "excess reads".
	 */
	if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {

	if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
		mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
		if (!mr->pi)
			mr->vbit ^= QM_MR_VERB_VBIT;
@@ -822,7 +816,7 @@ static inline int qm_mc_init(struct qm_portal *portal)

	mc->cr = portal->addr.ce + QM_CL_CR;
	mc->rr = portal->addr.ce + QM_CL_RR0;
	mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
	mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
		    ? 0 : 1;
	mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING
@@ -880,7 +874,7 @@ static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
	 * its command is submitted and completed. This includes the valid-bit,
	 * in case you were wondering...
	 */
	if (!__raw_readb(&rr->verb)) {
	if (!rr->verb) {
		dpaa_invalidate_touch_ro(rr);
		return NULL;
	}
@@ -1120,8 +1114,9 @@ static int qman_create_portal(struct qman_portal *portal,
	 * config, everything that follows depends on it and "config" is more
	 * for (de)reference
	 */
	p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
	p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
	p->addr.ce = c->addr_virt_ce;
	p->addr.ce_be = c->addr_virt_ce;
	p->addr.ci = c->addr_virt_ci;
	/*
	 * If CI-stashing is used, the current defaults use a threshold of 3,
	 * and stash with high-than-DQRR priority.
Loading