Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2152593f authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Revert "ARM64: Insert barriers before Store-Release operations""

parents 4c5185c9 d7aab291
Loading
Loading
Loading
Loading
+0 −11
Original line number Original line Diff line number Diff line
@@ -529,17 +529,6 @@ config CPU_BIG_ENDIAN
       help
       help
         Say Y if you plan on running a kernel in big-endian mode.
         Say Y if you plan on running a kernel in big-endian mode.


config ARM64_STLR_NEEDS_BARRIER
	bool "Store-Release operations require explicit barriers"
	def_bool ARCH_MSM8996
	help
	  Some early samples of MSMTHULIUM SoCs require that an explicit barrier
	  be executed prior to any Store-Release operation (STLR) to comform to
	  ARM memory ordering requirements. If you are building the kernel to
	  work on one of these early designs, select 'Y' here.

	  For production kernels, you should say 'N' here.

config SMP
config SMP
	bool "Symmetric Multi-Processing"
	bool "Symmetric Multi-Processing"
	help
	help
+0 −19
Original line number Original line Diff line number Diff line
@@ -58,24 +58,6 @@ do { \
#define smp_rmb()	dmb(ishld)
#define smp_rmb()	dmb(ishld)
#define smp_wmb()	dmb(ishst)
#define smp_wmb()	dmb(ishst)


#ifdef CONFIG_ARM64_STLR_NEEDS_BARRIER
#define smp_store_release(p, v)						\
do {									\
	compiletime_assert_atomic_type(*p);				\
	switch (sizeof(*p)) {						\
	case 4:								\
		asm volatile ("dmb nsh\n"				\
			      "stlr %w1, %0"				\
				: "=Q" (*p) : "r" (v) : "memory");	\
		break;							\
	case 8:								\
		asm volatile ("dmb nsh\n"				\
			      "stlr %1, %0"				\
				: "=Q" (*p) : "r" (v) : "memory");	\
		break;							\
	}								\
} while (0)
#else
#define smp_store_release(p, v)						\
#define smp_store_release(p, v)						\
do {									\
do {									\
	compiletime_assert_atomic_type(*p);				\
	compiletime_assert_atomic_type(*p);				\
@@ -90,7 +72,6 @@ do { \
		break;							\
		break;							\
	}								\
	}								\
} while (0)
} while (0)
#endif


#define smp_load_acquire(p)						\
#define smp_load_acquire(p)						\
({									\
({									\
+0 −6
Original line number Original line Diff line number Diff line
@@ -85,9 +85,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
{
	asm volatile(
	asm volatile(
#ifdef CONFIG_ARM64_STLR_NEEDS_BARRIER
"	dmb nsh\n"
#endif
"	stlrh	%w1, %0\n"
"	stlrh	%w1, %0\n"
	: "=Q" (lock->owner)
	: "=Q" (lock->owner)
	: "r" (lock->owner + 1)
	: "r" (lock->owner + 1)
@@ -156,9 +153,6 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
{
	asm volatile(
	asm volatile(
#ifdef CONFIG_ARM64_STLR_NEEDS_BARRIER
"	dmb nsh\n"
#endif
	"	stlr	%w1, %0\n"
	"	stlr	%w1, %0\n"
	: "=Q" (rw->lock) : "r" (0) : "memory");
	: "=Q" (rw->lock) : "r" (0) : "memory");
}
}
+0 −12
Original line number Original line Diff line number Diff line
@@ -89,18 +89,6 @@ config BRCMSTB_L2_IRQ
	select GENERIC_IRQ_CHIP
	select GENERIC_IRQ_CHIP
	select IRQ_DOMAIN
	select IRQ_DOMAIN


config MSM_GIC_SGI_NEEDS_BARRIER
	bool "SGI operations require explicit barriers"
	depends on ARM_GIC_V3
	def_bool ARCH_MSMTHULIUM
	help
	  Some early samples of the MSMTHULIUM SoCs require that an explicit
	  barrier be executed between two sucessive writes to the ICC_SGI1R_EL1
	  register. If you are building the kernel to work on one of these early
	  designs, select 'Y' here.

	  For production kernels, you should say 'N' here.

config MSM_SHOW_RESUME_IRQ
config MSM_SHOW_RESUME_IRQ
	bool "Enable logging of interrupts that could have caused resume"
	bool "Enable logging of interrupts that could have caused resume"
	depends on ARM_GIC
	depends on ARM_GIC
+6 −10
Original line number Original line Diff line number Diff line
@@ -122,12 +122,16 @@ static u64 __maybe_unused gic_read_iar(void)
	u64 irqstat;
	u64 irqstat;


	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
	asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
	/* As per the architecture specification */
	mb();
	return irqstat;
	return irqstat;
}
}


static void __maybe_unused gic_write_pmr(u64 val)
static void __maybe_unused gic_write_pmr(u64 val)
{
{
	asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
	asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
	/* As per the architecture specification */
	mb();
}
}


static void __maybe_unused gic_write_ctlr(u64 val)
static void __maybe_unused gic_write_ctlr(u64 val)
@@ -144,17 +148,9 @@ static void __maybe_unused gic_write_grpen1(u64 val)


static void __maybe_unused gic_write_sgi1r(u64 val)
static void __maybe_unused gic_write_sgi1r(u64 val)
{
{
#ifdef CONFIG_MSM_GIC_SGI_NEEDS_BARRIER
	static DEFINE_RAW_SPINLOCK(sgi_lock);
	unsigned long flags;
	raw_spin_lock_irqsave(&sgi_lock, flags);
#endif

	asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
	asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
#ifdef CONFIG_MSM_GIC_SGI_NEEDS_BARRIER
	/* As per the architecture specification */
	dsb(nsh);
	mb();
	raw_spin_unlock_irqrestore(&sgi_lock, flags);
#endif
}
}


static void gic_enable_sre(void)
static void gic_enable_sre(void)