Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 79c49681 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull MIPS fixes from Ralf Baechle:
 "Here's a final round of fixes for 4.12:

   - Fix misordered instructions in assembly code making kenel startup
     via UHB unreliable.

   - Fix special case of MADDF and MADDF emulation.

   - Fix alignment issue in address calculation in pm-cps on 64 bit.

   - Fix IRQ tracing & lockdep when rescheduling

   - Systems with MAARs require post-DMA cache flushes.

  The reordering fix and the MADDF/MSUBF fix have sat in linux-next for
  a number of days. The others haven't propagated from my pull tree to
  linux-next yet but all have survived manual testing and Imagination's
  automated test system and there are no pending bug reports"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: Avoid accidental raw backtrace
  MIPS: Perform post-DMA cache flushes on systems with MAARs
  MIPS: Fix IRQ tracing & lockdep when rescheduling
  MIPS: pm-cps: Drop manual cache-line alignment of ready_count
  MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately
  MIPS: head: Reorder instructions missing a delay slot
parents 3a61a54c 85423636
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -119,6 +120,7 @@ work_pending:
	andi	t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
	beqz	t0, work_notifysig
work_resched:
	TRACE_IRQS_OFF
	jal	schedule

	local_irq_disable		# make sure need_resched and
@@ -155,6 +157,7 @@ syscall_exit_work:
	beqz	t0, work_pending	# trace bit set?
	local_irq_enable		# could let syscall_trace_leave()
					# call schedule() instead
	TRACE_IRQS_ON
	move	a0, sp
	jal	syscall_trace_leave
	b	resume_userspace
+1 −1
Original line number Diff line number Diff line
@@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
	beq		t0, t1, dtb_found
#endif
	li		t1, -2
	beq		a0, t1, dtb_found
	move		t2, a1
	beq		a0, t1, dtb_found

	li		t2, 0
dtb_found:
+1 −8
Original line number Diff line number Diff line
@@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
 * state. Actually per-core rather than per-CPU.
 */
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);

/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
{
	enum cps_pm_state state;
	unsigned core = cpu_data[cpu].core;
	unsigned dlinesz = cpu_data[cpu].dcache.linesz;
	void *entry_fn, *core_rc;

	for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
	}

	if (!per_cpu(ready_count, core)) {
		core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
		core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
		if (!core_rc) {
			pr_err("Failed allocate core %u ready_count\n", core);
			return -ENOMEM;
		}
		per_cpu(ready_count_alloc, core) = core_rc;

		/* Ensure ready_count is aligned to a cacheline boundary */
		core_rc += dlinesz - 1;
		core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
		per_cpu(ready_count, core) = core_rc;
	}

+2 −0
Original line number Diff line number Diff line
@@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
{
	struct pt_regs regs;
	mm_segment_t old_fs = get_fs();

	regs.cp0_status = KSU_KERNEL;
	if (sp) {
		regs.regs[29] = (unsigned long)sp;
		regs.regs[31] = 0;
+4 −1
Original line number Diff line number Diff line
@@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
		return ieee754dp_nanxcpt(z);
	case IEEE754_CLASS_DNORM:
		DPDNORMZ;
	/* QNAN is handled separately below */
	/* QNAN and ZERO cases are handled separately below */
	}

	switch (CLPAIR(xc, yc)) {
@@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
	}
	assert(rm & (DP_HIDDEN_BIT << 3));

	if (zc == IEEE754_CLASS_ZERO)
		return ieee754dp_format(rs, re, rm);

	/* And now the addition */
	assert(zm & DP_HIDDEN_BIT);

Loading