Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e09177a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull MIPS fixes from Paul Burton:
 "A couple more MIPS fixes for 4.18:

   - Use async IPIs for arch_trigger_cpumask_backtrace() in order to
     avoid warnings & deadlocks, fixing a problem introduced in v3.19
     with the fix trivial to backport as far as v4.9.

   - Fix ioremap()'s MMU/TLB backed path to avoid spuriously rejecting
     valid requests due to an incorrect belief that the memory region is
     backed by potentially-in-use RAM. This fixes a regression in v4.2"

* tag 'mips_fixes_4.18_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
  MIPS: Fix ioremap() RAM check
  MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
  MIPS: Call dump_stack() from show_regs()
parents 30c2c32d 523402fa
Loading
Loading
Loading
Loading
+29 −14
Original line number Original line Diff line number Diff line
@@ -29,6 +29,7 @@
#include <linux/kallsyms.h>
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <linux/random.h>
#include <linux/prctl.h>
#include <linux/prctl.h>
#include <linux/nmi.h>


#include <asm/asm.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/bootinfo.h>
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
	return sp & ALMASK;
	return sp & ALMASK;
}
}


static void arch_dump_stack(void *info)
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
static struct cpumask backtrace_csd_busy;

static void handle_backtrace(void *info)
{
{
	struct pt_regs *regs;
	nmi_cpu_backtrace(get_irq_regs());
	cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}


	regs = get_irq_regs();
static void raise_backtrace(cpumask_t *mask)
{
	call_single_data_t *csd;
	int cpu;


	if (regs)
	for_each_cpu(cpu, mask) {
		show_regs(regs);
		/*
		 * If we previously sent an IPI to the target CPU & it hasn't
		 * cleared its bit in the busy cpumask then it didn't handle
		 * our previous IPI & it's not safe for us to reuse the
		 * call_single_data_t.
		 */
		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
			pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
				cpu);
			continue;
		}


	dump_stack();
		csd = &per_cpu(backtrace_csd, cpu);
		csd->func = handle_backtrace;
		smp_call_function_single_async(cpu, csd);
	}
}
}


void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
{
	long this_cpu = get_cpu();
	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);

	if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
		dump_stack();

	smp_call_function_many(mask, arch_dump_stack, NULL, 1);

	put_cpu();
}
}


int mips_get_process_fp_mode(struct task_struct *task)
int mips_get_process_fp_mode(struct task_struct *task)
+1 −0
Original line number Original line Diff line number Diff line
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
{
	__show_regs((struct pt_regs *)regs);
	__show_regs((struct pt_regs *)regs);
	dump_stack();
}
}


void show_registers(struct pt_regs *regs)
void show_registers(struct pt_regs *regs)
+25 −12
Original line number Original line Diff line number Diff line
@@ -9,6 +9,7 @@
#include <linux/export.h>
#include <linux/export.h>
#include <asm/addrspace.h>
#include <asm/addrspace.h>
#include <asm/byteorder.h>
#include <asm/byteorder.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
	return error;
	return error;
}
}


static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
			       void *arg)
{
	unsigned long i;

	for (i = 0; i < nr_pages; i++) {
		if (pfn_valid(start_pfn + i) &&
		    !PageReserved(pfn_to_page(start_pfn + i)))
			return 1;
	}

	return 0;
}

/*
/*
 * Generic mapping function (not visible outside):
 * Generic mapping function (not visible outside):
 */
 */
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,


void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
{
{
	unsigned long offset, pfn, last_pfn;
	struct vm_struct * area;
	struct vm_struct * area;
	unsigned long offset;
	phys_addr_t last_addr;
	phys_addr_t last_addr;
	void * addr;
	void * addr;


@@ -137,17 +152,15 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
		return (void __iomem *) CKSEG1ADDR(phys_addr);
		return (void __iomem *) CKSEG1ADDR(phys_addr);


	/*
	/*
	 * Don't allow anybody to remap normal RAM that we're using..
	 * Don't allow anybody to remap RAM that may be allocated by the page
	 * allocator, since that could lead to races & data clobbering.
	 */
	 */
	if (phys_addr < virt_to_phys(high_memory)) {
	pfn = PFN_DOWN(phys_addr);
		char *t_addr, *t_end;
	last_pfn = PFN_DOWN(last_addr);
		struct page *page;
	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,

				  __ioremap_check_ram) == 1) {
		t_addr = __va(phys_addr);
		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
		t_end = t_addr + (size - 1);
			  &phys_addr, &last_addr);

		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
			if(!PageReserved(page))
		return NULL;
		return NULL;
	}
	}