Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dbcb7486 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 4.9.114 into android-4.9



Changes in 4.9.114
	MIPS: Use async IPIs for arch_trigger_cpumask_backtrace()
	compiler, clang: suppress warning for unused static inline functions
	compiler, clang: properly override 'inline' for clang
	compiler, clang: always inline when CONFIG_OPTIMIZE_INLINING is disabled
	compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations
	x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h>
	x86/paravirt: Make native_save_fl() extern inline
	ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent
	ocfs2: ip_alloc_sem should be taken in ocfs2_get_block()
	mtd: m25p80: consider max message size in m25p80_read
	bcm63xx_enet: correct clock usage
	bcm63xx_enet: do not write to random DMA channel on BCM6345
	crypto: crypto4xx - remove bad list_del
	crypto: crypto4xx - fix crypto4xx_build_pdr, crypto4xx_build_sdr leak
	atm: zatm: Fix potential Spectre v1
	ipvlan: fix IFLA_MTU ignored on NEWLINK
	net: dccp: avoid crash in ccid3_hc_rx_send_feedback()
	net: dccp: switch rx_tstamp_last_feedback to monotonic clock
	net/mlx5: Fix incorrect raw command length parsing
	net/mlx5: Fix wrong size allocation for QoS ETC TC regitster
	net_sched: blackhole: tell upper qdisc about dropped packets
	net: sungem: fix rx checksum support
	qed: Fix use of incorrect size in memcpy call.
	qed: Limit msix vectors in kdump kernel to the minimum required count.
	qmi_wwan: add support for the Dell Wireless 5821e module
	r8152: napi hangup fix after disconnect
	tcp: fix Fast Open key endianness
	tcp: prevent bogus FRTO undos with non-SACK flows
	vhost_net: validate sock before trying to put its fd
	net/packet: fix use-after-free
	net/mlx5: Fix command interface race in polling mode
	net: cxgb3_main: fix potential Spectre v1
	rtlwifi: rtl8821ae: fix firmware is not ready to run
	net: lan78xx: Fix race in tx pending skb size calculation
	netfilter: ebtables: reject non-bridge targets
	reiserfs: fix buffer overflow with long warning messages
	KEYS: DNS: fix parsing multiple options
	netfilter: ipv6: nf_defrag: drop skb dst before queueing
	rds: avoid unenecessary cong_update in loop transport
	net/nfc: Avoid stalls when nfc_alloc_send_skb() returned NULL.
	arm64: assembler: introduce ldr_this_cpu
	KVM: arm64: Store vcpu on the stack during __guest_enter()
	KVM: arm/arm64: Convert kvm_host_cpu_state to a static per-cpu allocation
	KVM: arm64: Change hyp_panic()s dependency on tpidr_el2
	arm64: alternatives: use tpidr_el2 on VHE hosts
	KVM: arm64: Stop save/restoring host tpidr_el1 on VHE
	arm64: alternatives: Add dynamic patching feature
	KVM: arm/arm64: Do not use kern_hyp_va() with kvm_vgic_global_state
	KVM: arm64: Avoid storing the vcpu pointer on the stack
	arm/arm64: smccc: Add SMCCC-specific return codes
	arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
	arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2
	arm64: Add ARCH_WORKAROUND_2 probing
	arm64: Add 'ssbd' command-line option
	arm64: ssbd: Add global mitigation state accessor
	arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation
	arm64: ssbd: Restore mitigation status on CPU resume
	arm64: ssbd: Introduce thread flag to control userspace mitigation
	arm64: ssbd: Add prctl interface for per-thread mitigation
	arm64: KVM: Add HYP per-cpu accessors
	arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
	arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
	arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID
	string: drop __must_check from strscpy() and restore strscpy() usages in cgroup
	Linux 4.9.114

Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents 90e7a900 19e5f4da
Loading
Loading
Loading
Loading
+17 −0
Original line number Diff line number Diff line
@@ -4035,6 +4035,23 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
	spia_pedr=
	spia_peddr=

	ssbd=		[ARM64,HW]
			Speculative Store Bypass Disable control

			On CPUs that are vulnerable to the Speculative
			Store Bypass vulnerability and offer a
			firmware based mitigation, this parameter
			indicates how the mitigation should be used:

			force-on:  Unconditionally enable mitigation for
				   for both kernel and userspace
			force-off: Unconditionally disable mitigation for
				   for both kernel and userspace
			kernel:    Always enable mitigation in the
				   kernel, and offer a prctl interface
				   to allow userspace to register its
				   interest in being mitigated too.

	stack_guard_gap=	[MM]
			override the default stack gap protection. The value
			is in page units and it defines how many pages prior
+1 −1
Original line number Diff line number Diff line
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 113
SUBLEVEL = 114
EXTRAVERSION =
NAME = Roaring Lionus

+12 −0
Original line number Diff line number Diff line
@@ -327,4 +327,16 @@ static inline bool kvm_arm_harden_branch_predictor(void)
	return false;
}

#define KVM_SSBD_UNKNOWN		-1
#define KVM_SSBD_FORCE_DISABLE		0
#define KVM_SSBD_KERNEL		1
#define KVM_SSBD_FORCE_ENABLE		2
#define KVM_SSBD_MITIGATED		3

static inline int kvm_arm_have_ssbd(void)
{
	/* No way to detect it yet, pretend it is not there. */
	return KVM_SSBD_UNKNOWN;
}

#endif /* __ARM_KVM_HOST_H__ */
+12 −0
Original line number Diff line number Diff line
@@ -28,6 +28,13 @@
 */
#define kern_hyp_va(kva)	(kva)

/* Contrary to arm64, there is no need to generate a PC-relative address */
#define hyp_symbol_addr(s)						\
	({								\
		typeof(s) *addr = &(s);					\
		addr;							\
	})

/*
 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
 */
@@ -249,6 +256,11 @@ static inline int kvm_map_vectors(void)
	return 0;
}

static inline int hyp_map_aux_data(void)
{
	return 0;
}

#endif	/* !__ASSEMBLY__ */

#endif /* __ARM_KVM_MMU_H__ */
+9 −15
Original line number Diff line number Diff line
@@ -51,8 +51,8 @@
__asm__(".arch_extension	virt");
#endif

DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
static unsigned long hyp_default_vectors;

/* Per-CPU variable containing the currently running vcpu. */
@@ -338,7 +338,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	}

	vcpu->cpu = cpu;
	vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
	vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);

	kvm_arm_set_running_vcpu(vcpu);
}
@@ -1199,19 +1199,8 @@ static inline void hyp_cpu_pm_exit(void)
}
#endif

static void teardown_common_resources(void)
{
	free_percpu(kvm_host_cpu_state);
}

static int init_common_resources(void)
{
	kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
	if (!kvm_host_cpu_state) {
		kvm_err("Cannot allocate host CPU state\n");
		return -ENOMEM;
	}

	/* set size of VMID supported by CPU */
	kvm_vmid_bits = kvm_get_vmid_bits();
	kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1369,7 +1358,7 @@ static int init_hyp_mode(void)
	for_each_possible_cpu(cpu) {
		kvm_cpu_context_t *cpu_ctxt;

		cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
		cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
		err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);

		if (err) {
@@ -1378,6 +1367,12 @@ static int init_hyp_mode(void)
		}
	}

	err = hyp_map_aux_data();
	if (err) {
		kvm_err("Cannot map host auxilary data: %d\n", err);
		goto out_err;
	}

	kvm_info("Hyp mode initialized successfully\n");

	return 0;
@@ -1447,7 +1442,6 @@ int kvm_arch_init(void *opaque)
out_hyp:
	teardown_hyp_mode();
out_err:
	teardown_common_resources();
	return err;
}

Loading