Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26caea76 authored by Pavel Fedin's avatar Pavel Fedin Committed by Christoffer Dall
Browse files

KVM: arm/arm64: Merge vgic_set_lr() and vgic_sync_lr_elrsr()



Now we see that vgic_set_lr() and vgic_sync_lr_elrsr() are always used
together. Merge them into one function, saving from second vgic_ops
dereferencing every time.

Signed-off-by: default avatarPavel Fedin <p.fedin@samsung.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 212c7654
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -112,7 +112,6 @@ struct vgic_vmcr {
struct vgic_ops {
	struct vgic_lr	(*get_lr)(const struct kvm_vcpu *, int);
	void	(*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
	void	(*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
	u64	(*get_elrsr)(const struct kvm_vcpu *vcpu);
	u64	(*get_eisr)(const struct kvm_vcpu *vcpu);
	void	(*clear_eisr)(struct kvm_vcpu *vcpu);
+0 −5
Original line number Diff line number Diff line
@@ -79,11 +79,7 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
		lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);

	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
}

static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
				  struct vgic_lr lr_desc)
{
	if (!(lr_desc.state & LR_STATE_MASK))
		vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
	else
@@ -167,7 +163,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v2_ops = {
	.get_lr			= vgic_v2_get_lr,
	.set_lr			= vgic_v2_set_lr,
	.sync_lr_elrsr		= vgic_v2_sync_lr_elrsr,
	.get_elrsr		= vgic_v2_get_elrsr,
	.get_eisr		= vgic_v2_get_eisr,
	.clear_eisr		= vgic_v2_clear_eisr,
+0 −5
Original line number Diff line number Diff line
@@ -112,11 +112,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
	}

	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
}

static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
				  struct vgic_lr lr_desc)
{
	if (!(lr_desc.state & LR_STATE_MASK))
		vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
	else
@@ -212,7 +208,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
static const struct vgic_ops vgic_v3_ops = {
	.get_lr			= vgic_v3_get_lr,
	.set_lr			= vgic_v3_set_lr,
	.sync_lr_elrsr		= vgic_v3_sync_lr_elrsr,
	.get_elrsr		= vgic_v3_get_elrsr,
	.get_eisr		= vgic_v3_get_eisr,
	.clear_eisr		= vgic_v3_clear_eisr,
+2 −12
Original line number Diff line number Diff line
@@ -1032,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
	vgic_ops->set_lr(vcpu, lr, vlr);
}

static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
			       struct vgic_lr vlr)
{
	vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
}

static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
{
	return vgic_ops->get_elrsr(vcpu);
@@ -1100,7 +1094,6 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)

	vlr.state = 0;
	vgic_set_lr(vcpu, lr_nr, vlr);
	vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}

/*
@@ -1162,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
	}

	vgic_set_lr(vcpu, lr_nr, vlr);
	vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
}

/*
@@ -1340,8 +1332,6 @@ static int process_queued_irq(struct kvm_vcpu *vcpu,
	vlr.hwirq = 0;
	vgic_set_lr(vcpu, lr, vlr);

	vgic_sync_lr_elrsr(vcpu, lr, vlr);

	return pending;
}

@@ -1442,8 +1432,6 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
	bool level_pending;

	level_pending = vgic_process_maintenance(vcpu);
	elrsr = vgic_get_elrsr(vcpu);
	elrsr_ptr = u64_to_bitmask(&elrsr);

	/* Deal with HW interrupts, and clear mappings for empty LRs */
	for (lr = 0; lr < vgic->nr_lr; lr++) {
@@ -1454,6 +1442,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
	}

	/* Check if we still have something up our sleeve... */
	elrsr = vgic_get_elrsr(vcpu);
	elrsr_ptr = u64_to_bitmask(&elrsr);
	pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
	if (level_pending || pending < vgic->nr_lr)
		set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);