Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1f668377 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Merge android-4.14.56 (818299f6) into msm-4.14"

parents f240f0ff 2973dadc
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -152,15 +152,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
the default option --strip-debug will be used.  Otherwise,
INSTALL_MOD_STRIP value will be used as the options to the strip command.

INSTALL_FW_PATH
--------------------------------------------------
INSTALL_FW_PATH specifies where to install the firmware blobs.
The default value is:

    $(INSTALL_MOD_PATH)/lib/firmware

The value can be overridden in which case the default value is ignored.

INSTALL_HDR_PATH
--------------------------------------------------
INSTALL_HDR_PATH specifies where to install user space headers when
+1 −1
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 55
SUBLEVEL = 56
EXTRAVERSION =
NAME = Petit Gorille

+7 −12
Original line number Diff line number Diff line
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
static __must_check inline bool may_use_simd(void)
{
	/*
	 * The raw_cpu_read() is racy if called with preemption enabled.
	 * This is not a bug: kernel_neon_busy is only set when
	 * preemption is disabled, so we cannot migrate to another CPU
	 * while it is set, nor can we migrate to a CPU where it is set.
	 * So, if we find it clear on some CPU then we're guaranteed to
	 * find it clear on any CPU we could migrate to.
	 *
	 * If we are in between kernel_neon_begin()...kernel_neon_end(),
	 * the flag will be set, but preemption is also disabled, so we
	 * can't migrate to another CPU and spuriously see it become
	 * false.
	 * kernel_neon_busy is only set while preemption is disabled,
	 * and is clear whenever preemption is enabled. Since
	 * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
	 * cannot change under our feet -- if it's set we cannot be
	 * migrated, and if it's clear we cannot be migrated to a CPU
	 * where it is set.
	 */
	return !in_irq() && !irqs_disabled() && !in_nmi() &&
		!raw_cpu_read(kernel_neon_busy);
		!this_cpu_read(kernel_neon_busy);
}

#else /* ! CONFIG_KERNEL_MODE_NEON */
+29 −14
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <linux/prctl.h>
#include <linux/nmi.h>

#include <asm/asm.h>
#include <asm/bootinfo.h>
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
	return sp & ALMASK;
}

static void arch_dump_stack(void *info)
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
static struct cpumask backtrace_csd_busy;

static void handle_backtrace(void *info)
{
	struct pt_regs *regs;
	nmi_cpu_backtrace(get_irq_regs());
	cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}

	regs = get_irq_regs();
static void raise_backtrace(cpumask_t *mask)
{
	call_single_data_t *csd;
	int cpu;

	if (regs)
		show_regs(regs);
	for_each_cpu(cpu, mask) {
		/*
		 * If we previously sent an IPI to the target CPU & it hasn't
		 * cleared its bit in the busy cpumask then it didn't handle
		 * our previous IPI & it's not safe for us to reuse the
		 * call_single_data_t.
		 */
		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
			pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
				cpu);
			continue;
		}

	dump_stack();
		csd = &per_cpu(backtrace_csd, cpu);
		csd->func = handle_backtrace;
		smp_call_function_single_async(cpu, csd);
	}
}

void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
	long this_cpu = get_cpu();

	if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
		dump_stack();

	smp_call_function_many(mask, arch_dump_stack, NULL, 1);

	put_cpu();
	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
}

int mips_get_process_fp_mode(struct task_struct *task)
+1 −0
Original line number Diff line number Diff line
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
	__show_regs((struct pt_regs *)regs);
	dump_stack();
}

void show_registers(struct pt_regs *regs)
Loading