Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e9b9eb59 authored by David S. Miller's avatar David S. Miller
Browse files

sparc64: Use pause instruction when available.



In atomic backoff and cpu_relax(), use the pause instruction
found on SPARC-T4 and later.

It makes the cpu strand unselectable for the given number of
cycles, unless an intervening disrupting trap occurs.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 270c10e0
Loading
Loading
Loading
Loading
+19 −13
Original line number Diff line number Diff line
@@ -16,6 +16,12 @@
88:	rd		%ccr, %g0;		\
	rd		%ccr, %g0;		\
	rd		%ccr, %g0;		\
	.section	.pause_patch,"ax";	\
	.word		88b;			\
	sllx		tmp, 7, tmp;		\
	wr		tmp, 0, %asr27;		\
	clr		tmp;			\
	.previous;				\
	brnz,pt		tmp, 88b;		\
	 sub		tmp, 1, tmp;		\
	set		BACKOFF_LIMIT, tmp;	\
+10 −3
Original line number Diff line number Diff line
@@ -196,9 +196,16 @@ extern unsigned long get_wchan(struct task_struct *task);
#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])

#define cpu_relax()	asm volatile("rd	%%ccr, %%g0\n\t" \
#define cpu_relax()	asm volatile("\n99:\n\t"			\
				     "rd	%%ccr, %%g0\n\t"	\
				     "rd	%%ccr, %%g0" \
				     "rd	%%ccr, %%g0\n\t"	\
				     "rd	%%ccr, %%g0\n\t"	\
				     ".section	.pause_patch,\"ax\"\n\t"\
				     ".word	99b\n\t"		\
				     "wr	%%g0, 128, %%asr27\n\t"	\
				     "nop\n\t"				\
				     "nop\n\t"				\
				     ".previous"			\
				     ::: "memory")

/* Prefetch support.  This is tuned for UltraSPARC-III and later.
+7 −0
Original line number Diff line number Diff line
@@ -59,6 +59,13 @@ struct popc_6insn_patch_entry {
extern struct popc_6insn_patch_entry __popc_6insn_patch,
	__popc_6insn_patch_end;

struct pause_patch_entry {
	unsigned int	addr;
	unsigned int	insns[3];
};
extern struct pause_patch_entry __pause_patch,
	__pause_patch_end;

extern void __init per_cpu_patch(void);
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
				    struct sun4v_1insn_patch_entry *);
+21 −0
Original line number Diff line number Diff line
@@ -316,6 +316,25 @@ static void __init popc_patch(void)
	}
}

static void __init pause_patch(void)
{
	struct pause_patch_entry *p;

	p = &__pause_patch;
	while (p < &__pause_patch_end) {
		unsigned long i, addr = p->addr;

		for (i = 0; i < 3; i++) {
			*(unsigned int *) (addr +  (i * 4)) = p->insns[i];
			wmb();
			__asm__ __volatile__("flush	%0"
					     : : "r" (addr +  (i * 4)));
		}

		p++;
	}
}

#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
@@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void)

	if (sparc64_elf_hwcap & AV_SPARC_POPC)
		popc_patch();
	if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
		pause_patch();
}

void __init setup_arch(char **cmdline_p)
+5 −0
Original line number Diff line number Diff line
@@ -132,6 +132,11 @@ SECTIONS
		*(.popc_6insn_patch)
		__popc_6insn_patch_end = .;
	}
	.pause_patch : {
		__pause_patch = .;
		*(.pause_patch)
		__pause_patch_end = .;
	}
	PERCPU_SECTION(SMP_CACHE_BYTES)

	. = ALIGN(PAGE_SIZE);