Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42a730ad authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 4.9.107 into android-4.9



Changes in 4.9.107
	arm64: lse: Add early clobbers to some input/output asm operands
	powerpc/64s: Clear PCR on boot
	USB: serial: cp210x: use tcflag_t to fix incompatible pointer type
	Revert "pinctrl: msm: Use dynamic GPIO numbering"
	xfs: detect agfl count corruption and reset agfl
	Revert "ima: limit file hash setting by user to fix and log modes"
	Input: elan_i2c_smbus - fix corrupted stack
	tracing: Fix crash when freeing instances with event triggers
	selinux: KASAN: slab-out-of-bounds in xattr_getsecurity
	cfg80211: further limit wiphy names to 64 bytes
	dma-buf: remove redundant initialization of sg_table
	rtlwifi: rtl8192cu: Remove variable self-assignment in rf.c
	ASoC: Intel: sst: remove redundant variable dma_dev_name
	platform/chrome: cros_ec_lpc: remove redundant pointer request
	x86/amd: revert commit 944e0fc5
	xen: set cpu capabilities from xen_start_kernel()
	x86/amd: don't set X86_BUG_SYSRET_SS_ATTRS when running under Xen
	tcp: avoid integer overflows in tcp_rcv_space_adjust()
	scsi: ufs: fix failure to read the string descriptor
	scsi: ufs: refactor device descriptor reading
	scsi: ufs: Factor out ufshcd_read_desc_param
	arm64: Add hypervisor safe helper for checking constant capabilities
	arm64/cpufeature: don't use mutex in bringup path
	powerpc/rfi-flush: Move out of HARDLOCKUP_DETECTOR #ifdef
	powerpc/pseries: Support firmware disable of RFI flush
	powerpc/powernv: Support firmware disable of RFI flush
	powerpc/rfi-flush: Move the logic to avoid a redo into the debugfs code
	powerpc/rfi-flush: Make it possible to call setup_rfi_flush() again
	powerpc/rfi-flush: Always enable fallback flush on pseries
	powerpc/rfi-flush: Differentiate enabled and patched flush types
	powerpc/rfi-flush: Call setup_rfi_flush() after LPM migration
	powerpc/pseries: Add new H_GET_CPU_CHARACTERISTICS flags
	powerpc: Add security feature flags for Spectre/Meltdown
	powerpc/pseries: Set or clear security feature flags
	powerpc/powernv: Set or clear security feature flags
	powerpc/64s: Move cpu_show_meltdown()
	powerpc/64s: Enhance the information in cpu_show_meltdown()
	powerpc/powernv: Use the security flags in pnv_setup_rfi_flush()
	powerpc/pseries: Use the security flags in pseries_setup_rfi_flush()
	powerpc/64s: Wire up cpu_show_spectre_v1()
	powerpc/64s: Wire up cpu_show_spectre_v2()
	powerpc/pseries: Fix clearing of security feature flags
	powerpc: Move default security feature flags
	powerpc/pseries: Restore default security feature flags on setup
	powerpc/64s: Fix section mismatch warnings from setup_rfi_flush()
	powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit
	net/mlx4_en: fix potential use-after-free with dma_unmap_page
	iio:kfifo_buf: check for uint overflow
	MIPS: ptrace: Fix PTRACE_PEEKUSR requests for 64-bit FGRs
	MIPS: prctl: Disallow FRE without FR with PR_SET_FP_MODE requests
	scsi: scsi_transport_srp: Fix shost to rport translation
	stm class: Use vmalloc for the master map
	hwtracing: stm: fix build error on some arches
	IB/core: Fix error code for invalid GID entry
	drm/psr: Fix missed entry in PSR setup time table.
	drm/i915: Disable LVDS on Radiant P845
	sparc64: Fix build warnings with gcc 7.
	fix io_destroy()/aio_complete() race
	mm: fix the NULL mapping case in __isolate_lru_page()
	sparc64: Don't clibber fixed registers in __multi4.
	serial: pl011: add console matching function
	Linux 4.9.107

Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents 54f1dc05 3c3d05fc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 106
SUBLEVEL = 107
EXTRAVERSION =
NAME = Roaring Lionus

+11 −11
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
	/* LSE atomics */
	"	mvn	%w[i], %w[i]\n"
	"	stclr	%w[i], %[v]")
	: [i] "+r" (w0), [v] "+Q" (v->counter)
	: [i] "+&r" (w0), [v] "+Q" (v->counter)
	: "r" (x1)
	: __LL_SC_CLOBBERS);
}
@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
	/* LSE atomics */						\
	"	mvn	%w[i], %w[i]\n"					\
	"	ldclr" #mb "	%w[i], %w[i], %[v]")			\
	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
	: "r" (x1)							\
	: __LL_SC_CLOBBERS, ##cl);					\
									\
@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
	/* LSE atomics */
	"	neg	%w[i], %w[i]\n"
	"	stadd	%w[i], %[v]")
	: [i] "+r" (w0), [v] "+Q" (v->counter)
	: [i] "+&r" (w0), [v] "+Q" (v->counter)
	: "r" (x1)
	: __LL_SC_CLOBBERS);
}
@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
	"	neg	%w[i], %w[i]\n"					\
	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
	"	add	%w[i], %w[i], w30")				\
	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
	: "r" (x1)							\
	: __LL_SC_CLOBBERS , ##cl);					\
									\
@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
	/* LSE atomics */						\
	"	neg	%w[i], %w[i]\n"					\
	"	ldadd" #mb "	%w[i], %w[i], %[v]")			\
	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
	: "r" (x1)							\
	: __LL_SC_CLOBBERS, ##cl);					\
									\
@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
	/* LSE atomics */
	"	mvn	%[i], %[i]\n"
	"	stclr	%[i], %[v]")
	: [i] "+r" (x0), [v] "+Q" (v->counter)
	: [i] "+&r" (x0), [v] "+Q" (v->counter)
	: "r" (x1)
	: __LL_SC_CLOBBERS);
}
@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
	/* LSE atomics */						\
	"	mvn	%[i], %[i]\n"					\
	"	ldclr" #mb "	%[i], %[i], %[v]")			\
	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
	: "r" (x1)							\
	: __LL_SC_CLOBBERS, ##cl);					\
									\
@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
	/* LSE atomics */
	"	neg	%[i], %[i]\n"
	"	stadd	%[i], %[v]")
	: [i] "+r" (x0), [v] "+Q" (v->counter)
	: [i] "+&r" (x0), [v] "+Q" (v->counter)
	: "r" (x1)
	: __LL_SC_CLOBBERS);
}
@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
	"	neg	%[i], %[i]\n"					\
	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
	"	add	%[i], %[i], x30")				\
	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
	: "r" (x1)							\
	: __LL_SC_CLOBBERS, ##cl);					\
									\
@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
	/* LSE atomics */						\
	"	neg	%[i], %[i]\n"					\
	"	ldadd" #mb "	%[i], %[i], %[v]")			\
	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
	: "r" (x1)							\
	: __LL_SC_CLOBBERS, ##cl);					\
									\
@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
	"	eor	%[old1], %[old1], %[oldval1]\n"			\
	"	eor	%[old2], %[old2], %[oldval2]\n"			\
	"	orr	%[old1], %[old1], %[old2]")			\
	: [old1] "+r" (x0), [old2] "+r" (x1),				\
	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
	  [v] "+Q" (*(unsigned long *)ptr)				\
	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
+20 −7
Original line number Diff line number Diff line
@@ -9,8 +9,6 @@
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H

#include <linux/jump_label.h>

#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -27,6 +25,8 @@

#ifndef __ASSEMBLY__

#include <linux/bug.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>

/* CPU feature register tracking */
@@ -96,6 +96,7 @@ struct arm64_cpu_capabilities {

extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;

bool this_cpu_has_cap(unsigned int cap);

@@ -104,16 +105,29 @@ static inline bool cpu_have_feature(unsigned int num)
	return elf_hwcap & (1UL << num);
}

static inline bool cpus_have_cap(unsigned int num)
/* System capability check for constant caps */
static inline bool __cpus_have_const_cap(int num)
{
	if (num >= ARM64_NCAPS)
		return false;
	if (__builtin_constant_p(num))
	return static_branch_unlikely(&cpu_hwcap_keys[num]);
	else
}

static inline bool cpus_have_cap(unsigned int num)
{
	if (num >= ARM64_NCAPS)
		return false;
	return test_bit(num, cpu_hwcaps);
}

static inline bool cpus_have_const_cap(int num)
{
	if (static_branch_likely(&arm64_const_caps_ready))
		return __cpus_have_const_cap(num);
	else
		return cpus_have_cap(num);
}

static inline void cpus_set_cap(unsigned int num)
{
	if (num >= ARM64_NCAPS) {
@@ -121,7 +135,6 @@ static inline void cpus_set_cap(unsigned int num)
			num, ARM64_NCAPS);
	} else {
		__set_bit(num, cpu_hwcaps);
		static_branch_enable(&cpu_hwcap_keys[num]);
	}
}

@@ -200,7 +213,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)

static inline bool system_supports_32bit_el0(void)
{
	return cpus_have_cap(ARM64_HAS_32BIT_EL0);
	return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
}

static inline bool system_supports_mixed_endian_el0(void)
+7 −3
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@

#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cpufeature.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -358,9 +359,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
				       unsigned long vector_ptr)
{
	/*
	 * Call initialization code, and switch to the full blown
	 * HYP code.
	 * Call initialization code, and switch to the full blown HYP code.
	 * If the cpucaps haven't been finalized yet, something has gone very
	 * wrong, and hyp will crash and burn when it uses any
	 * cpus_have_const_cap() wrapper.
	 */
	BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
	__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
}

@@ -398,7 +402,7 @@ static inline void __cpu_init_stage2(void)

static inline bool kvm_arm_harden_branch_predictor(void)
{
	return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
	return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
}

#endif /* __ARM64_KVM_HOST_H__ */
+1 −1
Original line number Diff line number Diff line
@@ -341,7 +341,7 @@ static inline void *kvm_get_hyp_vector(void)
		vect = __bp_harden_hyp_vecs_start +
		       data->hyp_vectors_slot * SZ_2K;

		if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
		if (!cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
			vect = lm_alias(vect);
	}

Loading