Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a690984d authored by Vineet Gupta's avatar Vineet Gupta
Browse files

ARC: [mm] refactor the core (i|d)cache line ops loops



Nothing semantical
* simplify the alignement code by using & operation only
* rename variables clearly as paddr

Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent c917a36f
Loading
Loading
Loading
Loading
+24 −29
Original line number Original line Diff line number Diff line
@@ -270,21 +270,20 @@ static inline void __dc_entire_op(const int cacheop)
 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
 * It's sole purpose is to help gcc generate ZOL
 * It's sole purpose is to help gcc generate ZOL
 */
 */
static inline void __dc_line_loop(unsigned long start, unsigned long sz,
static inline void __dc_line_loop(unsigned long paddr, unsigned long sz,
				  int aux_reg)
				  int aux_reg)
{
{
	int num_lines, slack;
	int num_lines;


	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @start - aligned to cache line and integral @num_lines.
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 * This however can be avoided for page sized since:
	 *  -@start will be cache-line aligned already (being page aligned)
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	 */
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
		slack = start & ~DCACHE_LINE_MASK;
		sz += paddr & ~DCACHE_LINE_MASK;
		sz += slack;
		paddr &= DCACHE_LINE_MASK;
		start -= slack;
	}
	}


	num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
	num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
@@ -298,17 +297,17 @@ static inline void __dc_line_loop(unsigned long start, unsigned long sz,
		 * doesn't support aliasing configs for D$, yet.
		 * doesn't support aliasing configs for D$, yet.
		 * Thus paddr is enough to provide both tag and index.
		 * Thus paddr is enough to provide both tag and index.
		 */
		 */
		write_aux_reg(ARC_REG_DC_PTAG, start);
		write_aux_reg(ARC_REG_DC_PTAG, paddr);
#endif
#endif
		write_aux_reg(aux_reg, start);
		write_aux_reg(aux_reg, paddr);
		start += ARC_DCACHE_LINE_LEN;
		paddr += ARC_DCACHE_LINE_LEN;
	}
	}
}
}


/*
/*
 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
 */
 */
static inline void __dc_line_op(unsigned long start, unsigned long sz,
static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
					const int cacheop)
					const int cacheop)
{
{
	unsigned long flags, tmp = tmp;
	unsigned long flags, tmp = tmp;
@@ -332,7 +331,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
	else
	else
		aux = ARC_REG_DC_FLDL;
		aux = ARC_REG_DC_FLDL;


	__dc_line_loop(start, sz, aux);
	__dc_line_loop(paddr, sz, aux);


	if (cacheop & OP_FLUSH)	/* flush / flush-n-inv both wait */
	if (cacheop & OP_FLUSH)	/* flush / flush-n-inv both wait */
		wait_for_flush();
		wait_for_flush();
@@ -347,7 +346,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
#else
#else


#define __dc_entire_op(cacheop)
#define __dc_entire_op(cacheop)
#define __dc_line_op(start, sz, cacheop)
#define __dc_line_op(paddr, sz, cacheop)


#endif /* CONFIG_ARC_HAS_DCACHE */
#endif /* CONFIG_ARC_HAS_DCACHE */


@@ -399,49 +398,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
/***********************************************************
/***********************************************************
 * Machine specific helper for per line I-Cache invalidate.
 * Machine specific helper for per line I-Cache invalidate.
 */
 */
static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
				unsigned long sz)
				unsigned long sz)
{
{
	unsigned long flags;
	unsigned long flags;
	int num_lines, slack;
	int num_lines;
	unsigned int addr;


	/*
	/*
	 * Ensure we properly floor/ceil the non-line aligned/sized requests:
	 * Ensure we properly floor/ceil the non-line aligned/sized requests:
	 * However page sized flushes can be compile time optimised.
	 * However page sized flushes can be compile time optimised.
	 *  -@phy_start will be cache-line aligned already (being page aligned)
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	 */
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
		slack = phy_start & ~ICACHE_LINE_MASK;
		sz += paddr & ~ICACHE_LINE_MASK;
		sz += slack;
		paddr &= ICACHE_LINE_MASK;
		phy_start -= slack;
		vaddr &= ICACHE_LINE_MASK;
	}
	}


	num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
	num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);


#if (CONFIG_ARC_MMU_VER > 2)
#if (CONFIG_ARC_MMU_VER <= 2)
	vaddr &= ICACHE_LINE_MASK;
	addr = phy_start;
#else
	/* bits 17:13 of vaddr go as bits 4:0 of paddr */
	/* bits 17:13 of vaddr go as bits 4:0 of paddr */
	addr = phy_start | ((vaddr >> 13) & 0x1F);
	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#endif
#endif


	local_irq_save(flags);
	local_irq_save(flags);
	while (num_lines-- > 0) {
	while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
#if (CONFIG_ARC_MMU_VER > 2)
		/* tag comes from phy addr */
		/* tag comes from phy addr */
		write_aux_reg(ARC_REG_IC_PTAG, addr);
		write_aux_reg(ARC_REG_IC_PTAG, paddr);


		/* index bits come from vaddr */
		/* index bits come from vaddr */
		write_aux_reg(ARC_REG_IC_IVIL, vaddr);
		write_aux_reg(ARC_REG_IC_IVIL, vaddr);
		vaddr += ARC_ICACHE_LINE_LEN;
		vaddr += ARC_ICACHE_LINE_LEN;
#else
#else
		/* paddr contains stuffed vaddrs bits */
		/* paddr contains stuffed vaddrs bits */
		write_aux_reg(ARC_REG_IC_IVIL, addr);
		write_aux_reg(ARC_REG_IC_IVIL, paddr);
#endif
#endif
		addr += ARC_ICACHE_LINE_LEN;
		paddr += ARC_ICACHE_LINE_LEN;
	}
	}
	local_irq_restore(flags);
	local_irq_restore(flags);
}
}