Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e9d90d47 authored by Avi Kivity's avatar Avi Kivity Committed by Marcelo Tosatti
Browse files

KVM: Remove internal timer abstraction



kvm_timer_fn(), the sole inhabitant of timer.c, is only used by lapic.c. Move
it there to make it easier to hack on it.

struct kvm_timer is a thin wrapper around hrtimer, and only adds obfuscation.
Move near its two users (with different names) to prepare for simplification.

Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 4a4541a4
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(addprefix ../../../virt/kvm/, async_pf.o)

kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
			   i8254.o timer.o cpuid.o pmu.o
			   i8254.o cpuid.o pmu.o
kvm-intel-y		+= vmx.o
kvm-amd-y		+= svm.o

+4 −4
Original line number Diff line number Diff line
@@ -272,14 +272,14 @@ static void destroy_pit_timer(struct kvm_pit *pit)
	flush_kthread_work(&pit->expired);
}

static bool kpit_is_periodic(struct kvm_timer *ktimer)
static bool kpit_is_periodic(struct kvm_pit_timer *ktimer)
{
	struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state,
						 pit_timer);
	return ps->is_periodic;
}

static struct kvm_timer_ops kpit_ops = {
static struct kvm_pit_timer_ops kpit_ops = {
	.is_periodic = kpit_is_periodic,
};

@@ -322,7 +322,7 @@ static void pit_do_work(struct kthread_work *work)

static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
{
	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
	struct kvm_pit_timer *ktimer = container_of(data, struct kvm_pit_timer, timer);
	struct kvm_pit *pt = ktimer->kvm->arch.vpit;

	if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
@@ -340,7 +340,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
{
	struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
	struct kvm_timer *pt = &ps->pit_timer;
	struct kvm_pit_timer *pt = &ps->pit_timer;
	s64 interval;

	if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
+17 −1
Original line number Diff line number Diff line
@@ -21,10 +21,26 @@ struct kvm_kpit_channel_state {
	ktime_t count_load_time;
};

struct kvm_pit_timer {
	struct hrtimer timer;
	s64 period; 				/* unit: ns */
	u32 timer_mode_mask;
	u64 tscdeadline;
	atomic_t pending;			/* accumulated triggered timers */
	bool reinject;
	struct kvm_pit_timer_ops *t_ops;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
};

struct kvm_pit_timer_ops {
	bool (*is_periodic)(struct kvm_pit_timer *);
};

struct kvm_kpit_state {
	struct kvm_kpit_channel_state channels[3];
	u32 flags;
	struct kvm_timer pit_timer;
	struct kvm_pit_timer pit_timer;
	bool is_periodic;
	u32    speaker_data_on;
	struct mutex lock;

arch/x86/kvm/kvm_timer.h

deleted100644 → 0
+0 −18
Original line number Diff line number Diff line

struct kvm_timer {
	struct hrtimer timer;
	s64 period; 				/* unit: ns */
	u32 timer_mode_mask;
	u64 tscdeadline;
	atomic_t pending;			/* accumulated triggered timers */
	bool reinject;
	struct kvm_timer_ops *t_ops;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
};

struct kvm_timer_ops {
	bool (*is_periodic)(struct kvm_timer *);
};

enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);
+29 −1
Original line number Diff line number Diff line
@@ -1262,6 +1262,34 @@ static const struct kvm_io_device_ops apic_mmio_ops = {
	.write    = apic_mmio_write,
};

static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
	struct kvm_vcpu *vcpu = ktimer->vcpu;
	wait_queue_head_t *q = &vcpu->wq;

	/*
	 * There is a race window between reading and incrementing, but we do
	 * not care about potentially losing timer events in the !reinject
	 * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
	 * in vcpu_enter_guest.
	 */
	if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
		atomic_inc(&ktimer->pending);
		/* FIXME: this code should not know anything about vcpus */
		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
	}

	if (waitqueue_active(q))
		wake_up_interruptible(q);

	if (ktimer->t_ops->is_periodic(ktimer)) {
		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
		return HRTIMER_RESTART;
	} else
		return HRTIMER_NORESTART;
}

int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic;
@@ -1285,7 +1313,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)

	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
		     HRTIMER_MODE_ABS);
	apic->lapic_timer.timer.function = kvm_timer_fn;
	apic->lapic_timer.timer.function = apic_timer_fn;
	apic->lapic_timer.t_ops = &lapic_timer_ops;
	apic->lapic_timer.kvm = vcpu->kvm;
	apic->lapic_timer.vcpu = vcpu;
Loading