Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd2384a7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ARC fixes from Vineet Gupta:
 "Here's a late pull request for accumulated ARC fixes which came out of
  extended testing of the new ARCv2 port with LTP etc.  llock/scond
  livelock workaround has been reviewed by PeterZ.  The changes look a
  lot but I've crafted them into finer grained patches for better
  tracking later.

  I have some more fixes (ARC Futex backend) ready to go but those will
  have to wait for tglx to return from vacation.

  Summary:
   - Enable a reduced config of HS38 (w/o div-rem, ll64...)
   - Add software workaround for LLOCK/SCOND livelock
   - Fallout of a recent pt_regs update"

* tag 'arc-v4.2-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARCv2: spinlock/rwlock/atomics: reduce 1 instruction in exponential backoff
  ARC: Make pt_regs regs unsigned
  ARCv2: spinlock/rwlock: Reset retry delay when starting a new spin-wait cycle
  ARCv2: spinlock/rwlock/atomics: Delayed retry of failed SCOND with exponential backoff
  ARC: LLOCK/SCOND based rwlock
  ARC: LLOCK/SCOND based spin_lock
  ARC: refactor atomic inline asm operands with symbolic names
  Revert "ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock"
  ARCv2: [axs103_smp] Reduce clk for Quad FPGA configs
  ARCv2: Fix the peripheral address space detection
  ARCv2: allow selection of page size for MMUv4
  ARCv2: lib: memset: Don't assume 64-bit load/stores
  ARCv2: lib: memcpy: Missing PREFETCHW
  ARCv2: add knob for DIV_REV in Kconfig
  ARC/time: Migrate to new 'set-state' interface
parents b3b98a55 10971638
Loading
Loading
Loading
Loading
+11 −2
Original line number Diff line number Diff line
@@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K

config ARC_PAGE_SIZE_16K
	bool "16KB"
	depends on ARC_MMU_V3
	depends on ARC_MMU_V3 || ARC_MMU_V4

config ARC_PAGE_SIZE_4K
	bool "4KB"
	depends on ARC_MMU_V3
	depends on ARC_MMU_V3 || ARC_MMU_V4

endchoice

@@ -365,6 +365,11 @@ config ARC_HAS_LLSC
	default y
	depends on !ARC_CANT_LLSC

config ARC_STAR_9000923308
	bool "Workaround for llock/scond livelock"
	default y
	depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC

config ARC_HAS_SWAPE
	bool "Insn: SWAPE (endian-swap)"
	default y
@@ -379,6 +384,10 @@ config ARC_HAS_LL64
	  dest operands with 2 possible source operands.
	default y

config ARC_HAS_DIV_REM
	bool "Insn: div, divu, rem, remu"
	default y

config ARC_HAS_RTC
	bool "Local 64-bit r/o cycle counter"
	default n
+9 −1
Original line number Diff line number Diff line
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
cflags-$(CONFIG_ARC_HAS_LLSC)		+= -mlock
cflags-$(CONFIG_ARC_HAS_SWAPE)		+= -mswape

ifdef CONFIG_ISA_ARCV2

ifndef CONFIG_ARC_HAS_LL64
cflags-$(CONFIG_ISA_ARCV2)		+= -mno-ll64
cflags-y				+= -mno-ll64
endif

ifndef CONFIG_ARC_HAS_DIV_REM
cflags-y				+= -mno-div-rem
endif

endif

cflags-$(CONFIG_ARC_DW2_UNWIND)		+= -fasynchronous-unwind-tables
+3 −4
Original line number Diff line number Diff line
@@ -89,11 +89,10 @@
#define ECR_C_BIT_DTLB_LD_MISS		8
#define ECR_C_BIT_DTLB_ST_MISS		9


/* Auxiliary registers */
#define AUX_IDENTITY		4
#define AUX_INTR_VEC_BASE	0x25

#define AUX_NON_VOL		0x5e

/*
 * Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {

struct bcr_perip {
#ifdef CONFIG_CPU_BIG_ENDIAN
	unsigned int start:8, pad2:8, sz:8, pad:8;
	unsigned int start:8, pad2:8, sz:8, ver:8;
#else
	unsigned int pad:8, sz:8, pad2:8, start:8;
	unsigned int ver:8, sz:8, pad2:8, start:8;
#endif
};

+55 −23
Original line number Diff line number Diff line
@@ -23,33 +23,60 @@

#define atomic_set(v, i) (((v)->counter) = (i))

#ifdef CONFIG_ISA_ARCV2
#define PREFETCHW	"	prefetchw   [%1]	\n"
#else
#define PREFETCHW
#ifdef CONFIG_ARC_STAR_9000923308

#define SCOND_FAIL_RETRY_VAR_DEF						\
	unsigned int delay = 1, tmp;						\

#define SCOND_FAIL_RETRY_ASM							\
	"	bz	4f			\n"				\
	"   ; --- scond fail delay ---		\n"				\
	"	mov	%[tmp], %[delay]	\n"	/* tmp = delay */	\
	"2: 	brne.d	%[tmp], 0, 2b		\n"	/* while (tmp != 0) */	\
	"	sub	%[tmp], %[tmp], 1	\n"	/* tmp-- */		\
	"	rol	%[delay], %[delay]	\n"	/* delay *= 2 */	\
	"	b	1b			\n"	/* start over */	\
	"4: ; --- success ---			\n"				\

#define SCOND_FAIL_RETRY_VARS							\
	  ,[delay] "+&r" (delay),[tmp] "=&r"	(tmp)				\

#else	/* !CONFIG_ARC_STAR_9000923308 */

#define SCOND_FAIL_RETRY_VAR_DEF

#define SCOND_FAIL_RETRY_ASM							\
	"	bnz     1b			\n"				\

#define SCOND_FAIL_RETRY_VARS

#endif

#define ATOMIC_OP(op, c_op, asm_op)					\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned int temp;						\
	unsigned int val;				                \
	SCOND_FAIL_RETRY_VAR_DEF                                        \
									\
	__asm__ __volatile__(						\
	"1:				\n"				\
	PREFETCHW							\
	"	llock   %0, [%1]	\n"				\
	"	" #asm_op " %0, %0, %2	\n"				\
	"	scond   %0, [%1]	\n"				\
	"	bnz     1b		\n"				\
	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
	: "r"(&v->counter), "ir"(i)					\
	"1:	llock   %[val], [%[ctr]]		\n"		\
	"	" #asm_op " %[val], %[val], %[i]	\n"		\
	"	scond   %[val], [%[ctr]]		\n"		\
	"						\n"		\
	SCOND_FAIL_RETRY_ASM						\
									\
	: [val]	"=&r"	(val) /* Early clobber to prevent reg reuse */	\
	  SCOND_FAIL_RETRY_VARS						\
	: [ctr]	"r"	(&v->counter), /* Not "m": llock only supports reg direct addr mode */	\
	  [i]	"ir"	(i)						\
	: "cc");							\
}									\

#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned int temp;						\
	unsigned int val;				                \
	SCOND_FAIL_RETRY_VAR_DEF                                        \
									\
	/*								\
	 * Explicit full memory barrier needed before/after as		\
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
	smp_mb();							\
									\
	__asm__ __volatile__(						\
	"1:				\n"				\
	PREFETCHW							\
	"	llock   %0, [%1]	\n"				\
	"	" #asm_op " %0, %0, %2	\n"				\
	"	scond   %0, [%1]	\n"				\
	"	bnz     1b		\n"				\
	: "=&r"(temp)							\
	: "r"(&v->counter), "ir"(i)					\
	"1:	llock   %[val], [%[ctr]]		\n"		\
	"	" #asm_op " %[val], %[val], %[i]	\n"		\
	"	scond   %[val], [%[ctr]]		\n"		\
	"						\n"		\
	SCOND_FAIL_RETRY_ASM						\
									\
	: [val]	"=&r"	(val)						\
	  SCOND_FAIL_RETRY_VARS						\
	: [ctr]	"r"	(&v->counter),					\
	  [i]	"ir"	(i)						\
	: "cc");							\
									\
	smp_mb();							\
									\
	return temp;							\
	return val;							\
}

#else	/* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#undef SCOND_FAIL_RETRY_VAR_DEF
#undef SCOND_FAIL_RETRY_ASM
#undef SCOND_FAIL_RETRY_VARS

/**
 * __atomic_add_unless - add unless the number is a given value
+27 −27
Original line number Diff line number Diff line
@@ -20,20 +20,20 @@
struct pt_regs {

	/* Real registers */
	long bta;	/* bta_l1, bta_l2, erbta */
	unsigned long bta;	/* bta_l1, bta_l2, erbta */

	long lp_start, lp_end, lp_count;
	unsigned long lp_start, lp_end, lp_count;

	long status32;	/* status32_l1, status32_l2, erstatus */
	long ret;	/* ilink1, ilink2 or eret */
	long blink;
	long fp;
	long r26;	/* gp */
	unsigned long status32;	/* status32_l1, status32_l2, erstatus */
	unsigned long ret;	/* ilink1, ilink2 or eret */
	unsigned long blink;
	unsigned long fp;
	unsigned long r26;	/* gp */

	long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
	unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;

	long sp;	/* user/kernel sp depending on where we came from  */
	long orig_r0;
	unsigned long sp;	/* User/Kernel depending on where we came from */
	unsigned long orig_r0;

	/*
	 * To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
		unsigned long event;
	};

	long user_r25;
	unsigned long user_r25;
};
#else

struct pt_regs {

	long orig_r0;
	unsigned long orig_r0;

	union {
		struct {
@@ -76,26 +76,26 @@ struct pt_regs {
		unsigned long event;
	};

	long bta;	/* bta_l1, bta_l2, erbta */
	unsigned long bta;	/* bta_l1, bta_l2, erbta */

	long user_r25;
	unsigned long user_r25;

	long r26;	/* gp */
	long fp;
	long sp;	/* user/kernel sp depending on where we came from  */
	unsigned long r26;	/* gp */
	unsigned long fp;
	unsigned long sp;	/* user/kernel sp depending on where we came from  */

	long r12;
	unsigned long r12;

	/*------- Below list auto saved by h/w -----------*/
	long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
	unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;

	long blink;
	long lp_end, lp_start, lp_count;
	unsigned long blink;
	unsigned long lp_end, lp_start, lp_count;

	long ei, ldi, jli;
	unsigned long ei, ldi, jli;

	long ret;
	long status32;
	unsigned long ret;
	unsigned long status32;
};

#endif
@@ -103,10 +103,10 @@ struct pt_regs {
/* Callee saved registers - need to be saved only when you are scheduled out */

struct callee_regs {
	long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
	unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};

#define instruction_pointer(regs)	(unsigned long)((regs)->ret)
#define instruction_pointer(regs)	((regs)->ret)
#define profile_pc(regs)		instruction_pointer(regs)

/* return 1 if user mode or 0 if kernel mode */
@@ -142,7 +142,7 @@ struct callee_regs {

static inline long regs_return_value(struct pt_regs *regs)
{
	return regs->r0;
	return (long)regs->r0;
}

#endif /* !__ASSEMBLY__ */
Loading