Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0217690f authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-ppc-fixes-4.15-3' of...

Merge tag 'kvm-ppc-fixes-4.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into kvm-master

PPC KVM fixes for 4.15

Four commits here, including two that were tagged but never merged.
Three of them are for the HPT resizing code; two of those fix a
user-triggerable use-after-free in the host, and one that fixes
stale TLB entries in the guest.  The remaining commit fixes a bug
causing PR KVM guests under PowerVM to fail to start.
parents 2a266f23 ecba8297
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
		gpte->may_read = true;
		gpte->may_read = true;
		gpte->may_write = true;
		gpte->may_write = true;
		gpte->page_size = MMU_PAGE_4K;
		gpte->page_size = MMU_PAGE_4K;
		gpte->wimg = HPTE_R_M;


		return 0;
		return 0;
	}
	}
+61 −29
Original line number Original line Diff line number Diff line
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
	u32 order;
	u32 order;


	/* These fields protected by kvm->lock */
	/* These fields protected by kvm->lock */

	/* Possible values and their usage:
	 *  <0     an error occurred during allocation,
	 *  -EBUSY allocation is in the progress,
	 *  0      allocation made successfuly.
	 */
	int error;
	int error;
	bool prepare_done;


	/* Private to the work thread, until prepare_done is true,
	/* Private to the work thread, until error != -EBUSY,
	 * then protected by kvm->resize_hpt_sem */
	 * then protected by kvm->lock.
	 */
	struct kvm_hpt_info hpt;
	struct kvm_hpt_info hpt;
};
};


@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
		 * Reset all the reverse-mapping chains for all memslots
		 * Reset all the reverse-mapping chains for all memslots
		 */
		 */
		kvmppc_rmap_reset(kvm);
		kvmppc_rmap_reset(kvm);
		/* Ensure that each vcpu will flush its TLB on next entry. */
		cpumask_setall(&kvm->arch.need_tlb_flush);
		err = 0;
		err = 0;
		goto out;
		goto out;
	}
	}
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
	kvmppc_set_hpt(kvm, &info);
	kvmppc_set_hpt(kvm, &info);


out:
out:
	if (err == 0)
		/* Ensure that each vcpu will flush its TLB on next entry. */
		cpumask_setall(&kvm->arch.need_tlb_flush);

	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->lock);
	return err;
	return err;
}
}
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)


static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
{
{
	BUG_ON(kvm->arch.resize_hpt != resize);
	if (WARN_ON(!mutex_is_locked(&kvm->lock)))
		return;


	if (!resize)
	if (!resize)
		return;
		return;


	if (resize->error != -EBUSY) {
		if (resize->hpt.virt)
		if (resize->hpt.virt)
			kvmppc_free_hpt(&resize->hpt);
			kvmppc_free_hpt(&resize->hpt);
		kfree(resize);
	}


	if (kvm->arch.resize_hpt == resize)
		kvm->arch.resize_hpt = NULL;
		kvm->arch.resize_hpt = NULL;
	kfree(resize);
}
}


static void resize_hpt_prepare_work(struct work_struct *work)
static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
						     struct kvm_resize_hpt,
						     struct kvm_resize_hpt,
						     work);
						     work);
	struct kvm *kvm = resize->kvm;
	struct kvm *kvm = resize->kvm;
	int err;
	int err = 0;

	if (WARN_ON(resize->error != -EBUSY))
		return;

	mutex_lock(&kvm->lock);

	/* Request is still current? */
	if (kvm->arch.resize_hpt == resize) {
		/* We may request large allocations here:
		 * do not sleep with kvm->lock held for a while.
		 */
		mutex_unlock(&kvm->lock);


		resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
		resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
				 resize->order);
				 resize->order);


		err = resize_hpt_allocate(resize);
		err = resize_hpt_allocate(resize);


		/* We have strict assumption about -EBUSY
		 * when preparing for HPT resize.
		 */
		if (WARN_ON(err == -EBUSY))
			err = -EINPROGRESS;

		mutex_lock(&kvm->lock);
		mutex_lock(&kvm->lock);
		/* It is possible that kvm->arch.resize_hpt != resize
		 * after we grab kvm->lock again.
		 */
	}


	resize->error = err;
	resize->error = err;
	resize->prepare_done = true;

	if (kvm->arch.resize_hpt != resize)
		resize_hpt_release(kvm, resize);


	mutex_unlock(&kvm->lock);
	mutex_unlock(&kvm->lock);
}
}
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,


	if (resize) {
	if (resize) {
		if (resize->order == shift) {
		if (resize->order == shift) {
			/* Suitable resize in progress */
			/* Suitable resize in progress? */
			if (resize->prepare_done) {
			ret = resize->error;
			ret = resize->error;
				if (ret != 0)
			if (ret == -EBUSY)
					resize_hpt_release(kvm, resize);
			} else {
				ret = 100; /* estimated time in ms */
				ret = 100; /* estimated time in ms */
			}
			else if (ret)
				resize_hpt_release(kvm, resize);


			goto out;
			goto out;
		}
		}
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
		ret = -ENOMEM;
		ret = -ENOMEM;
		goto out;
		goto out;
	}
	}

	resize->error = -EBUSY;
	resize->order = shift;
	resize->order = shift;
	resize->kvm = kvm;
	resize->kvm = kvm;
	INIT_WORK(&resize->work, resize_hpt_prepare_work);
	INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
	if (!resize || (resize->order != shift))
	if (!resize || (resize->order != shift))
		goto out;
		goto out;


	ret = -EBUSY;
	if (!resize->prepare_done)
		goto out;

	ret = resize->error;
	ret = resize->error;
	if (ret != 0)
	if (ret)
		goto out;
		goto out;


	ret = resize_hpt_rehash(resize);
	ret = resize_hpt_rehash(resize);
	if (ret != 0)
	if (ret)
		goto out;
		goto out;


	resize_hpt_pivot(resize);
	resize_hpt_pivot(resize);
+2 −0
Original line number Original line Diff line number Diff line
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
#define MSR_USER32 MSR_USER
#define MSR_USER32 MSR_USER
#define MSR_USER64 MSR_USER
#define MSR_USER64 MSR_USER
#define HW_PAGE_SIZE PAGE_SIZE
#define HW_PAGE_SIZE PAGE_SIZE
#define HPTE_R_M   _PAGE_COHERENT
#endif
#endif


static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
		pte.eaddr = eaddr;
		pte.eaddr = eaddr;
		pte.vpage = eaddr >> 12;
		pte.vpage = eaddr >> 12;
		pte.page_size = MMU_PAGE_64K;
		pte.page_size = MMU_PAGE_64K;
		pte.wimg = HPTE_R_M;
	}
	}


	switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
	switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {