Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6cd21278 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge 4.9.47 into android-4.9



Changes in 4.9.47
	p54: memset(0) whole array
	scsi: isci: avoid array subscript warning
	staging: wilc1000: simplify vif[i]->ndev accesses
	gcov: support GCC 7.1
	kvm: arm/arm64: Fix race in resetting stage2 PGD
	arm64: mm: abort uaccess retries upon fatal signal
	x86/io: Add "memory" clobber to insb/insw/insl/outsb/outsw/outsl
	arm64: fpsimd: Prevent registers leaking across exec
	locking/spinlock/debug: Remove spinlock lockup detection code
	scsi: sg: protect accesses to 'reserved' page array
	scsi: sg: reset 'res_in_use' after unlinking reserved array
	lz4: fix bogus gcc warning
	Linux 4.9.47

Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@google.com>
parents 91004426 458ca52f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 46
SUBLEVEL = 47
EXTRAVERSION =
NAME = Roaring Lionus

+8 −8
Original line number Diff line number Diff line
@@ -829,22 +829,22 @@ void stage2_unmap_vm(struct kvm *kvm)
 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
 * underlying level-2 and level-3 tables before freeing the actual level-1 table
 * and setting the struct pointer to NULL.
 *
 * Note we don't need locking here as this is only called when the VM is
 * destroyed, which can only be done once.
 */
void kvm_free_stage2_pgd(struct kvm *kvm)
{
	if (kvm->arch.pgd == NULL)
		return;
	void *pgd = NULL;

	spin_lock(&kvm->mmu_lock);
	if (kvm->arch.pgd) {
		unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
		pgd = kvm->arch.pgd;
		kvm->arch.pgd = NULL;
	}
	spin_unlock(&kvm->mmu_lock);

	/* Free the HW pgd, one page at a time */
	free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
	kvm->arch.pgd = NULL;
	if (pgd)
		free_pages_exact(pgd, S2_PGD_SIZE);
}

static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+2 −0
Original line number Diff line number Diff line
@@ -157,9 +157,11 @@ void fpsimd_thread_switch(struct task_struct *next)

void fpsimd_flush_thread(void)
{
	preempt_disable();
	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
	fpsimd_flush_task_state(current);
	set_thread_flag(TIF_FOREIGN_FPSTATE);
	preempt_enable();
}

/*
+4 −1
Original line number Diff line number Diff line
@@ -379,8 +379,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
	 * signal first. We do not need to release the mmap_sem because it
	 * would already be released in __lock_page_or_retry in mm/filemap.c.
	 */
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
		if (!user_mode(regs))
			goto no_context;
		return 0;
	}

	/*
	 * Major/minor page fault accounting is only done on the initial
+2 −2
Original line number Diff line number Diff line
@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{									\
	asm volatile("rep; outs" #bwl					\
		     : "+S"(addr), "+c"(count) : "d"(port));		\
		     : "+S"(addr), "+c"(count) : "d"(port) : "memory");	\
}									\
									\
static inline void ins##bwl(int port, void *addr, unsigned long count)	\
{									\
	asm volatile("rep; ins" #bwl					\
		     : "+D"(addr), "+c"(count) : "d"(port));		\
		     : "+D"(addr), "+c"(count) : "d"(port) : "memory");	\
}

BUILDIO(b, b, char)
Loading