Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a1865769 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: ia64: Makefile fix for forcing to re-generate asm-offsets.h
  KVM: Future-proof device assignment ABI
  KVM: ia64: Fix halt emulation logic
  KVM: Fix guest shared interrupt with in-kernel irqchip
  KVM: MMU: sync root on paravirt TLB flush
parents 0d8762c9 e45948b0
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
	long itc_offset;
	unsigned long itc_check;
	unsigned long timer_check;
	unsigned long timer_pending;
	unsigned int timer_pending;
	unsigned int timer_fired;

	unsigned long vrr[8];
	unsigned long ibr[8];
@@ -417,6 +418,9 @@ struct kvm_arch {
	struct list_head assigned_dev_head;
	struct dmar_domain *intel_iommu_domain;
	struct hlist_head irq_ack_notifier_list;

	unsigned long irq_sources_bitmap;
	unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
};

union cpuid3_t {
+6 −2
Original line number Diff line number Diff line
@@ -29,13 +29,18 @@ define cmd_offsets
	 echo ""; \
	 echo "#endif" ) > $@
endef

# We use internal rules to avoid the "is up to date" message from make
arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c
arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \
			$(wildcard $(srctree)/arch/ia64/include/asm/*.h)\
			$(wildcard $(srctree)/include/linux/*.h)
	$(call if_changed_dep,cc_s_c)

$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
	$(call cmd,offsets)

FORCE : $(obj)/$(offsets-file)

#
# Makefile for Kernel-based Virtual Machine module
#
@@ -53,7 +58,6 @@ endif
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
obj-$(CONFIG_KVM) += kvm.o

FORCE : $(obj)/$(offsets-file)
EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
	vtlb.o process.o
+41 −39
Original line number Diff line number Diff line
@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
	struct kvm *kvm = vcpu->kvm;
	struct call_data call_data;
	int i;

	call_data.ptc_g_data = p->u.ptc_g_data;

	for (i = 0; i < KVM_MAX_VCPUS; i++) {
@@ -418,12 +419,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
	ktime_t kt;
	long itc_diff;
	unsigned long vcpu_now_itc;

	unsigned long expires;
	struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
	unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
	struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);

	if (irqchip_in_kernel(vcpu->kvm)) {

		vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;

		if (time_after(vcpu_now_itc, vpd->itm)) {
@@ -436,15 +438,22 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)

		expires = div64_u64(itc_diff, cyc_per_usec);
		kt = ktime_set(0, 1000 * expires);

		down_read(&vcpu->kvm->slots_lock);
		vcpu->arch.ht_active = 1;
		hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);

	if (irqchip_in_kernel(vcpu->kvm)) {
		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
		kvm_vcpu_block(vcpu);
		hrtimer_cancel(p_ht);
		vcpu->arch.ht_active = 0;

		if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
			if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
				vcpu->arch.mp_state =
					KVM_MP_STATE_RUNNABLE;
		up_read(&vcpu->kvm->slots_lock);

		if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
			return -EINTR;
		return 1;
@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
static const int kvm_vti_max_exit_handlers =
		sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);

static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}

static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{
	struct exit_ctl_data *p_exit_data;
@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

again:
	preempt_disable();

	kvm_prepare_guest_switch(vcpu);
	local_irq_disable();

	if (signal_pending(current)) {
@@ -614,7 +617,7 @@ again:

	vcpu->guest_mode = 1;
	kvm_guest_enter();

	down_read(&vcpu->kvm->slots_lock);
	r = vti_vcpu_run(vcpu, kvm_run);
	if (r < 0) {
		local_irq_enable();
@@ -634,9 +637,8 @@ again:
	 * But we need to prevent reordering, hence this barrier():
	 */
	barrier();

	kvm_guest_exit();

	up_read(&vcpu->kvm->slots_lock);
	preempt_enable();

	r = kvm_handle_exit(kvm_run, vcpu);
@@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
		kvm_vcpu_block(vcpu);
		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
		vcpu_put(vcpu);
		return -EAGAIN;
	}
@@ -778,6 +781,9 @@ static void kvm_init_vm(struct kvm *kvm)
	kvm_build_io_pmt(kvm);

	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);

	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
}

struct  kvm *kvm_arch_create_vm(void)
@@ -941,9 +947,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
			goto out;
		if (irqchip_in_kernel(kvm)) {
			mutex_lock(&kvm->lock);
			kvm_ioapic_set_irq(kvm->arch.vioapic,
						irq_event.irq,
						irq_event.level);
			kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
				    irq_event.irq, irq_event.level);
			mutex_unlock(&kvm->lock);
			r = 0;
		}
@@ -1123,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
	wait_queue_head_t *q;

	vcpu  = container_of(data, struct kvm_vcpu, arch.hlt_timer);
	q = &vcpu->wq;

	if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
		goto out;

	q = &vcpu->wq;
	if (waitqueue_active(q)) {
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
	if (waitqueue_active(q))
		wake_up_interruptible(q);
	}

out:
	vcpu->arch.timer_fired = 1;
	vcpu->arch.timer_check = 1;
	return HRTIMER_NORESTART;
}
@@ -1700,12 +1706,14 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
	int ipi_pcpu = vcpu->cpu;
	int cpu = get_cpu();

	if (waitqueue_active(&vcpu->wq))
		wake_up_interruptible(&vcpu->wq);

	if (vcpu->guest_mode)
	if (vcpu->guest_mode && cpu != ipi_pcpu)
		smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
	put_cpu();
}

int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
@@ -1715,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)

	if (!test_and_set_bit(vec, &vpd->irr[0])) {
		vcpu->arch.irq_new_pending = 1;
		 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
		kvm_vcpu_kick(vcpu);
		else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
			if (waitqueue_active(&vcpu->wq))
				wake_up_interruptible(&vcpu->wq);
		}
		return 1;
	}
	return 0;
@@ -1791,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
	return 0;
	return vcpu->arch.timer_fired;
}

gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+7 −2
Original line number Diff line number Diff line
@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
	return index;
}

static void prepare_for_halt(struct kvm_vcpu *vcpu)
{
	vcpu->arch.timer_pending = 1;
	vcpu->arch.timer_fired = 0;
}

int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{

@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
		break;
	case PAL_HALT_LIGHT:
	{
		vcpu->arch.timer_pending = 1;
		INIT_PAL_STATUS_SUCCESS(result);
		prepare_for_halt(vcpu);
		if (kvm_highest_pending_irq(vcpu) == -1)
			ret = kvm_emulate_halt(vcpu);

	}
		break;

+3 −0
Original line number Diff line number Diff line
@@ -364,6 +364,9 @@ struct kvm_arch{

	struct page *ept_identity_pagetable;
	bool ept_identity_pagetable_done;

	unsigned long irq_sources_bitmap;
	unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
};

struct kvm_vm_stat {
Loading