Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b802b6ef authored by Juergen Gross's avatar Juergen Gross Committed by Greg Kroah-Hartman
Browse files

xen/events: avoid handling the same event on two cpus at the same time



commit b6622798bc50b625a1e62f82c7190df40c1f5b21 upstream.

When changing the cpu affinity of an event it can happen today that
(with some unlucky timing) the same event will be handled on the old
and the new cpu at the same time.

Avoid that by adding an "event active" flag to the per-event data and
call the handler only if this flag isn't set.

Cc: stable@vger.kernel.org
Reported-by: default avatarJulien Grall <julien@xen.org>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJulien Grall <jgrall@amazon.com>
Link: https://lore.kernel.org/r/20210306161833.4552-4-jgross@suse.com


Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 92aefc62
Loading
Loading
Loading
Loading
+17 −8
Original line number Diff line number Diff line
@@ -694,6 +694,12 @@ static void xen_evtchn_close(unsigned int port)
		BUG();
}

static void event_handler_exit(struct irq_info *info)
{
	smp_store_release(&info->is_active, 0);
	clear_evtchn(info->evtchn);
}

static void pirq_query_unmask(int irq)
{
	struct physdev_irq_status_query irq_status;
@@ -724,13 +730,13 @@ static void eoi_pirq(struct irq_data *data)
	    likely(!irqd_irq_disabled(data))) {
		do_mask(info, EVT_MASK_REASON_TEMPORARY);

		clear_evtchn(evtchn);
		event_handler_exit(info);

		irq_move_masked_irq(data);

		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
	} else
		clear_evtchn(evtchn);
		event_handler_exit(info);

	if (pirq_needs_eoi(data->irq)) {
		rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@@ -1566,6 +1572,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
	}

	info = info_for_irq(irq);
	if (xchg_acquire(&info->is_active, 1))
		return;

	if (ctrl->defer_eoi) {
		info->eoi_cpu = smp_processor_id();
@@ -1753,13 +1761,13 @@ static void ack_dynirq(struct irq_data *data)
	    likely(!irqd_irq_disabled(data))) {
		do_mask(info, EVT_MASK_REASON_TEMPORARY);

		clear_evtchn(evtchn);
		event_handler_exit(info);

		irq_move_masked_irq(data);

		do_unmask(info, EVT_MASK_REASON_TEMPORARY);
	} else
		clear_evtchn(evtchn);
		event_handler_exit(info);
}

static void mask_ack_dynirq(struct irq_data *data)
@@ -1775,7 +1783,7 @@ static void lateeoi_ack_dynirq(struct irq_data *data)

	if (VALID_EVTCHN(evtchn)) {
		do_mask(info, EVT_MASK_REASON_EOI_PENDING);
		clear_evtchn(evtchn);
		event_handler_exit(info);
	}
}

@@ -1786,7 +1794,7 @@ static void lateeoi_mask_ack_dynirq(struct irq_data *data)

	if (VALID_EVTCHN(evtchn)) {
		do_mask(info, EVT_MASK_REASON_EXPLICIT);
		clear_evtchn(evtchn);
		event_handler_exit(info);
	}
}

@@ -1895,10 +1903,11 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq)
{
	int evtchn = evtchn_from_irq(irq);
	struct irq_info *info = info_for_irq(irq);
	evtchn_port_t evtchn = info ? info->evtchn : 0;

	if (VALID_EVTCHN(evtchn))
		clear_evtchn(evtchn);
		event_handler_exit(info);
}
EXPORT_SYMBOL(xen_clear_irq_pending);
void xen_set_irq_pending(int irq)
+1 −0
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@ struct irq_info {
#define EVT_MASK_REASON_EXPLICIT	0x01
#define EVT_MASK_REASON_TEMPORARY	0x02
#define EVT_MASK_REASON_EOI_PENDING	0x04
	u8 is_active;		/* Is event just being handled? */
	unsigned irq;
	unsigned int evtchn;	/* event channel */
	unsigned short cpu;	/* cpu bound */