Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab08e05d authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 5.4.256 into android11-5.4-lts



Changes in 5.4.256
	powerpc/pmac/smp: Avoid unused-variable warnings
	powerpc/pmac/smp: Drop unnecessary volatile qualifier
	Revert "MIPS: Alchemy: fix dbdma2"
	Linux 5.4.256

Change-Id: Id963efe437b2c99033d2ae4496b1081333f628bb
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents 1becc9d0 0c2544ad
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 255
SUBLEVEL = 256
EXTRAVERSION =
NAME = Kleptomaniac Octopus

+12 −15
Original line number Diff line number Diff line
@@ -30,7 +30,6 @@
 *
 */

#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -624,18 +623,17 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;

	/*
	 * There is an erratum on certain Au1200/Au1550 revisions that could
	 * result in "stale" data being DMA'ed. It has to do with the snoop
	 * logic on the cache eviction buffer.  dma_default_coherent is set
	 * to false on these parts.
	 * There is an errata on the Au1200/Au1550 parts that could result
	 * in "stale" data being DMA'ed. It has to do with the snoop logic on
	 * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
	 * these parts. If it is fixed in the future, these dma_cache_inv will
	 * just be nothing more than empty macros. See io.h.
	 */
	if (!dma_default_coherent)
		dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
	dma_cache_wback_inv((unsigned long)buf, nbytes);
	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
	wmb(); /* drain writebuffer */
	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
	ctp->chan_ptr->ddma_dbell = 0;
	wmb(); /* force doorbell write out to dma engine */

	/* Get next descriptor pointer. */
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
@@ -687,18 +685,17 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
			  dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
#endif
	/*
	 * There is an erratum on certain Au1200/Au1550 revisions that could
	 * result in "stale" data being DMA'ed. It has to do with the snoop
	 * logic on the cache eviction buffer.  dma_default_coherent is set
	 * to false on these parts.
	 * There is an errata on the Au1200/Au1550 parts that could result in
	 * "stale" data being DMA'ed. It has to do with the snoop logic on the
	 * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
	 * parts. If it is fixed in the future, these dma_cache_inv will just
	 * be nothing more than empty macros. See io.h.
	 */
	if (!dma_default_coherent)
		dma_cache_inv(KSEG0ADDR(buf), nbytes);
	dma_cache_inv((unsigned long)buf, nbytes);
	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
	wmb(); /* drain writebuffer */
	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
	ctp->chan_ptr->ddma_dbell = 0;
	wmb(); /* force doorbell write out to dma engine */

	/* Get next descriptor pointer. */
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+4 −4
Original line number Diff line number Diff line
@@ -660,13 +660,13 @@ static void smp_core99_gpio_tb_freeze(int freeze)

#endif /* !CONFIG_PPC64 */

/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
volatile static long int core99_l2_cache;
volatile static long int core99_l3_cache;

static void core99_init_caches(int cpu)
{
#ifndef CONFIG_PPC64
	/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
	static long int core99_l2_cache;
	static long int core99_l3_cache;

	if (!cpu_has_feature(CPU_FTR_L2CR))
		return;