Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 006df0f3 authored by Christoffer Dall's avatar Christoffer Dall Committed by Christoffer Dall
Browse files

KVM: arm/arm64: Support calling vgic_update_irq_pending from irq context



We are about to optimize our timer handling logic which involves
injecting irqs to the vgic directly from the irq handler.

Unfortunately, the injection path can take any AP list lock and irq lock
and we must therefore make sure to use spin_lock_irqsave where ever
interrupts are enabled and we are taking any of those locks, to avoid
deadlocking between process context and the ISR.

This changes a lot of the VGIC code, but the good news are that the
changes are mostly mechanical.

Acked-by: default avatarMarc Zyngier <marc,zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <cdall@linaro.org>
parent f39d16cb
Loading
Loading
Loading
Loading
+10 −7
Original line number Diff line number Diff line
@@ -278,6 +278,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
	u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
	u8 prop;
	int ret;
	unsigned long flags;

	ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
			     &prop, 1);
@@ -285,15 +286,15 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
	if (ret)
		return ret;

	spin_lock(&irq->irq_lock);
	spin_lock_irqsave(&irq->irq_lock, flags);

	if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
		irq->priority = LPI_PROP_PRIORITY(prop);
		irq->enabled = LPI_PROP_ENABLE_BIT(prop);

		vgic_queue_irq_unlock(kvm, irq);
		vgic_queue_irq_unlock(kvm, irq, flags);
	} else {
		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
	}

	return 0;
@@ -393,6 +394,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
	int ret = 0;
	u32 *intids;
	int nr_irqs, i;
	unsigned long flags;

	nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
	if (nr_irqs < 0)
@@ -420,9 +422,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
		}

		irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		irq->pending_latch = pendmask & (1U << bit_nr);
		vgic_queue_irq_unlock(vcpu->kvm, irq);
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}

@@ -515,6 +517,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
{
	struct kvm_vcpu *vcpu;
	struct its_ite *ite;
	unsigned long flags;

	if (!its->enabled)
		return -EBUSY;
@@ -530,9 +533,9 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
	if (!vcpu->arch.vgic_cpu.lpis_enabled)
		return -EBUSY;

	spin_lock(&ite->irq->irq_lock);
	spin_lock_irqsave(&ite->irq->irq_lock, flags);
	ite->irq->pending_latch = true;
	vgic_queue_irq_unlock(kvm, ite->irq);
	vgic_queue_irq_unlock(kvm, ite->irq, flags);

	return 0;
}
+13 −9
Original line number Diff line number Diff line
@@ -74,6 +74,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
	int mode = (val >> 24) & 0x03;
	int c;
	struct kvm_vcpu *vcpu;
	unsigned long flags;

	switch (mode) {
	case 0x0:		/* as specified by targets */
@@ -97,11 +98,11 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,

		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		irq->pending_latch = true;
		irq->source |= 1U << source_vcpu->vcpu_id;

		vgic_queue_irq_unlock(source_vcpu->kvm, irq);
		vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
		vgic_put_irq(source_vcpu->kvm, irq);
	}
}
@@ -131,6 +132,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
	int i;
	unsigned long flags;

	/* GICD_ITARGETSR[0-7] are read-only */
	if (intid < VGIC_NR_PRIVATE_IRQS)
@@ -140,13 +142,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
		int target;

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);

		irq->targets = (val >> (i * 8)) & cpu_mask;
		target = irq->targets ? __ffs(irq->targets) : 0;
		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);

		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}
}
@@ -174,17 +176,18 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
{
	u32 intid = addr & 0x0f;
	int i;
	unsigned long flags;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);

		irq->source &= ~((val >> (i * 8)) & 0xff);
		if (!irq->source)
			irq->pending_latch = false;

		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}
}
@@ -195,19 +198,20 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
{
	u32 intid = addr & 0x0f;
	int i;
	unsigned long flags;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);

		irq->source |= (val >> (i * 8)) & 0xff;

		if (irq->source) {
			irq->pending_latch = true;
			vgic_queue_irq_unlock(vcpu->kvm, irq);
			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
		} else {
			spin_unlock(&irq->irq_lock);
			spin_unlock_irqrestore(&irq->irq_lock, flags);
		}
		vgic_put_irq(vcpu->kvm, irq);
	}
+10 −7
Original line number Diff line number Diff line
@@ -129,6 +129,7 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
{
	int intid = VGIC_ADDR_TO_INTID(addr, 64);
	struct vgic_irq *irq;
	unsigned long flags;

	/* The upper word is WI for us since we don't implement Aff3. */
	if (addr & 4)
@@ -139,13 +140,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
	if (!irq)
		return;

	spin_lock(&irq->irq_lock);
	spin_lock_irqsave(&irq->irq_lock, flags);

	/* We only care about and preserve Aff0, Aff1 and Aff2. */
	irq->mpidr = val & GENMASK(23, 0);
	irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);

	spin_unlock(&irq->irq_lock);
	spin_unlock_irqrestore(&irq->irq_lock, flags);
	vgic_put_irq(vcpu->kvm, irq);
}

@@ -241,11 +242,12 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
	unsigned long flags;

	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		if (test_bit(i, &val)) {
			/*
			 * pending_latch is set irrespective of irq type
@@ -253,10 +255,10 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
			 * restore irq config before pending info.
			 */
			irq->pending_latch = true;
			vgic_queue_irq_unlock(vcpu->kvm, irq);
			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
		} else {
			irq->pending_latch = false;
			spin_unlock(&irq->irq_lock);
			spin_unlock_irqrestore(&irq->irq_lock, flags);
		}

		vgic_put_irq(vcpu->kvm, irq);
@@ -799,6 +801,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
	int sgi, c;
	int vcpu_id = vcpu->vcpu_id;
	bool broadcast;
	unsigned long flags;

	sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
	broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
@@ -837,10 +840,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)

		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		irq->pending_latch = true;

		vgic_queue_irq_unlock(vcpu->kvm, irq);
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}
}
+26 −18
Original line number Diff line number Diff line
@@ -69,13 +69,14 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
	unsigned long flags;

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		irq->enabled = true;
		vgic_queue_irq_unlock(vcpu->kvm, irq);
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);

		vgic_put_irq(vcpu->kvm, irq);
	}
@@ -87,15 +88,16 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
	unsigned long flags;

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);

		irq->enabled = false;

		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}
}
@@ -126,14 +128,15 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
	unsigned long flags;

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		irq->pending_latch = true;

		vgic_queue_irq_unlock(vcpu->kvm, irq);
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}
}
@@ -144,15 +147,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
	int i;
	unsigned long flags;

	for_each_set_bit(i, &val, len * 8) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);

		irq->pending_latch = false;

		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}
}
@@ -181,7 +185,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
				    bool new_active_state)
{
	struct kvm_vcpu *requester_vcpu;
	spin_lock(&irq->irq_lock);
	unsigned long flags;
	spin_lock_irqsave(&irq->irq_lock, flags);

	/*
	 * The vcpu parameter here can mean multiple things depending on how
@@ -216,9 +221,9 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,

	irq->active = new_active_state;
	if (new_active_state)
		vgic_queue_irq_unlock(vcpu->kvm, irq);
		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
	else
		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
}

/*
@@ -352,14 +357,15 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
	int i;
	unsigned long flags;

	for (i = 0; i < len; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		/* Narrow the priority range to what we actually support */
		irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);

		vgic_put_irq(vcpu->kvm, irq);
	}
@@ -390,6 +396,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
{
	u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
	int i;
	unsigned long flags;

	for (i = 0; i < len * 4; i++) {
		struct vgic_irq *irq;
@@ -404,14 +411,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
			continue;

		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);

		if (test_bit(i * 2 + 1, &val))
			irq->config = VGIC_CONFIG_EDGE;
		else
			irq->config = VGIC_CONFIG_LEVEL;

		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}
}
@@ -443,6 +450,7 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
{
	int i;
	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
	unsigned long flags;

	for (i = 0; i < 32; i++) {
		struct vgic_irq *irq;
@@ -459,12 +467,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
		 * restore irq config before line level.
		 */
		new_level = !!(val & (1U << i));
		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);
		irq->line_level = new_level;
		if (new_level)
			vgic_queue_irq_unlock(vcpu->kvm, irq);
			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
		else
			spin_unlock(&irq->irq_lock);
			spin_unlock_irqrestore(&irq->irq_lock, flags);

		vgic_put_irq(vcpu->kvm, irq);
	}
+3 −2
Original line number Diff line number Diff line
@@ -62,6 +62,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
	struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
	int lr;
	unsigned long flags;

	cpuif->vgic_hcr &= ~GICH_HCR_UIE;

@@ -77,7 +78,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)

		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);

		spin_lock(&irq->irq_lock);
		spin_lock_irqsave(&irq->irq_lock, flags);

		/* Always preserve the active bit */
		irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -104,7 +105,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
				irq->pending_latch = false;
		}

		spin_unlock(&irq->irq_lock);
		spin_unlock_irqrestore(&irq->irq_lock, flags);
		vgic_put_irq(vcpu->kvm, irq);
	}

Loading