Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab53f22e authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-arm-for-3.14' of...

Merge tag 'kvm-arm-for-3.14' of git://git.linaro.org/people/christoffer.dall/linux-kvm-arm into kvm-queue
parents 4a55dd72 61466710
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2327,7 +2327,7 @@ current state. "addr" is ignored.
Capability: basic
Architectures: arm, arm64
Type: vcpu ioctl
Parameters: struct struct kvm_vcpu_init (in)
Parameters: struct kvm_vcpu_init (in)
Returns: 0 on success; -1 on error
Errors:
  EINVAL:    the target is unknown, or the combination of features is invalid.
+1 −0
Original line number Diff line number Diff line
@@ -140,6 +140,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
}

#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l))
#define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x))

#endif	/* !__ASSEMBLY__ */

+19 −11
Original line number Diff line number Diff line
@@ -489,15 +489,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
			return ret;
	}

	/*
	 * Handle the "start in power-off" case by calling into the
	 * PSCI code.
	 */
	if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
		*vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
		kvm_psci_call(vcpu);
	}

	return 0;
}

@@ -711,6 +702,24 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
	return -EINVAL;
}

static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
					 struct kvm_vcpu_init *init)
{
	int ret;

	ret = kvm_vcpu_set_target(vcpu, init);
	if (ret)
		return ret;

	/*
	 * Handle the "start in power-off" case by marking the VCPU as paused.
	 */
	if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
		vcpu->arch.pause = true;

	return 0;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
@@ -724,8 +733,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
		if (copy_from_user(&init, argp, sizeof(init)))
			return -EFAULT;

		return kvm_vcpu_set_target(vcpu, &init);

		return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
	}
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG: {
+0 −2
Original line number Diff line number Diff line
@@ -26,8 +26,6 @@

#include "trace.h"

#include "trace.h"

typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);

static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+13 −11
Original line number Diff line number Diff line
@@ -667,14 +667,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
		gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
	} else {
		/*
		 * Pages belonging to VMAs not aligned to the PMD mapping
		 * granularity cannot be mapped using block descriptors even
		 * if the pages belong to a THP for the process, because the
		 * stage-2 block descriptor will cover more than a single THP
		 * and we loose atomicity for unmapping, updates, and splits
		 * of the THP or other pages in the stage-2 block range.
		 * Pages belonging to memslots that don't have the same
		 * alignment for userspace and IPA cannot be mapped using
		 * block descriptors even if the pages belong to a THP for
		 * the process, because the stage-2 block descriptor will
		 * cover more than a single THP and we loose atomicity for
		 * unmapping, updates, and splits of the THP or other pages
		 * in the stage-2 block range.
		 */
		if (vma->vm_start & ~PMD_MASK)
		if ((memslot->userspace_addr & ~PMD_MASK) !=
		    ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
			force_pte = true;
	}
	up_read(&current->mm->mmap_sem);
@@ -916,9 +918,9 @@ int kvm_mmu_init(void)
{
	int err;

	hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
	hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
	hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
	hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
	hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
	hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);

	if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
		/*
@@ -945,7 +947,7 @@ int kvm_mmu_init(void)
		 */
		kvm_flush_dcache_to_poc(init_bounce_page, len);

		phys_base = virt_to_phys(init_bounce_page);
		phys_base = kvm_virt_to_phys(init_bounce_page);
		hyp_idmap_vector += phys_base - hyp_idmap_start;
		hyp_idmap_start = phys_base;
		hyp_idmap_end = phys_base + len;
Loading