Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a6655e4 authored by Catalin Marinas's avatar Catalin Marinas Committed by Santosh Shilimkar
Browse files

ARM: Improve the L2 cache performance when PL310 is used



With this L2 cache controller, the cache maintenance by PA and sync
operations are atomic and do not require a "wait" loop. This patch
conditionally defines the cache_wait() function.

Since L2x0 cache controllers do not work with ARMv7 CPUs, the patch
automatically enables CACHE_PL310 when only CPU_V7 is defined.

Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 899611ee
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -779,6 +779,14 @@ config CACHE_L2X0
	help
	  This option enables the L2x0 PrimeCell.

config CACHE_PL310
	bool
	depends on CACHE_L2X0
	default y if CPU_V7 && !CPU_V6
	help
	  This option enables optimisations for the PL310 cache
	  controller.

config CACHE_TAUROS2
	bool "Enable the Tauros2 L2 cache controller"
	depends on (ARCH_DOVE || ARCH_MMP)
+12 −3
Original line number Diff line number Diff line
@@ -29,13 +29,22 @@ static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask;	/* Bitmask of active ways */

static inline void cache_wait(void __iomem *reg, unsigned long mask)
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
	/* wait for the operation to complete */
	/* wait for cache operation by line or way to complete */
	while (readl_relaxed(reg) & mask)
		;
}

#ifdef CONFIG_CACHE_PL310
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
	/* cache operations by line are atomic on PL310 */
}
#else
#define cache_wait	cache_wait_way
#endif

static inline void cache_sync(void)
{
	void __iomem *base = l2x0_base;
@@ -110,7 +119,7 @@ static inline void l2x0_inv_all(void)
	/* invalidate all ways */
	spin_lock_irqsave(&l2x0_lock, flags);
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
	cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
	cache_sync();
	spin_unlock_irqrestore(&l2x0_lock, flags);
}