Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2b3fede8 authored by Sean Anderson's avatar Sean Anderson Committed by Greg Kroah-Hartman
Browse files

soc: fsl: qbman: Use raw spinlock for cgr_lock



[ Upstream commit fbec4e7fed89b579f2483041fabf9650fb0dd6bc ]

smp_call_function always runs its callback in hard IRQ context, even on
PREEMPT_RT, where spinlocks can sleep. So we need to use a raw spinlock
for cgr_lock to ensure we aren't waiting on a sleeping task.

Although this bug has existed for a while, it was not apparent until
commit ef2a8d5478b9 ("net: dpaa: Adjust queue depth on rate change")
which invokes smp_call_function_single via qman_update_cgr_safe every
time a link goes up or down.

Fixes: 96f413f4 ("soc/fsl/qbman: fix issue in qman_delete_cgr_safe()")
CC: stable@vger.kernel.org
Reported-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Closes: https://lore.kernel.org/all/20230323153935.nofnjucqjqnz34ej@skbuf/


Reported-by: default avatarSteffen Trumtrar <s.trumtrar@pengutronix.de>
Closes: https://lore.kernel.org/linux-arm-kernel/87wmsyvclu.fsf@pengutronix.de/


Signed-off-by: default avatarSean Anderson <sean.anderson@linux.dev>
Reviewed-by: default avatarCamelia Groza <camelia.groza@nxp.com>
Tested-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent e2bd2df4
Loading
Loading
Loading
Loading
+14 −11
Original line number Diff line number Diff line
@@ -976,7 +976,7 @@ struct qman_portal {
	/* linked-list of CSCN handlers. */
	struct list_head cgr_cbs;
	/* list lock */
	spinlock_t cgr_lock;
	raw_spinlock_t cgr_lock;
	struct work_struct congestion_work;
	struct work_struct mr_work;
	char irqname[MAX_IRQNAME];
@@ -1194,7 +1194,7 @@ static int qman_create_portal(struct qman_portal *portal,
		/* if the given mask is NULL, assume all CGRs can be seen */
		qman_cgrs_fill(&portal->cgrs[0]);
	INIT_LIST_HEAD(&portal->cgr_cbs);
	spin_lock_init(&portal->cgr_lock);
	raw_spin_lock_init(&portal->cgr_lock);
	INIT_WORK(&portal->congestion_work, qm_congestion_task);
	INIT_WORK(&portal->mr_work, qm_mr_process_task);
	portal->bits = 0;
@@ -1369,11 +1369,14 @@ static void qm_congestion_task(struct work_struct *work)
	union qm_mc_result *mcr;
	struct qman_cgr *cgr;

	spin_lock_irq(&p->cgr_lock);
	/*
	 * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
	 */
	raw_spin_lock_irq(&p->cgr_lock);
	qm_mc_start(&p->p);
	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
	if (!qm_mc_result_timeout(&p->p, &mcr)) {
		spin_unlock_irq(&p->cgr_lock);
		raw_spin_unlock_irq(&p->cgr_lock);
		dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
		qman_p_irqsource_add(p, QM_PIRQ_CSCI);
		return;
@@ -1389,7 +1392,7 @@ static void qm_congestion_task(struct work_struct *work)
	list_for_each_entry(cgr, &p->cgr_cbs, node)
		if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
			cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
	spin_unlock_irq(&p->cgr_lock);
	raw_spin_unlock_irq(&p->cgr_lock);
	qman_p_irqsource_add(p, QM_PIRQ_CSCI);
}

@@ -2346,7 +2349,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
	preempt_enable();

	cgr->chan = p->config->channel;
	spin_lock_irq(&p->cgr_lock);
	raw_spin_lock_irq(&p->cgr_lock);

	if (opts) {
		struct qm_mcc_initcgr local_opts = *opts;
@@ -2383,7 +2386,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
	    qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
		cgr->cb(p, cgr, 1);
out:
	spin_unlock_irq(&p->cgr_lock);
	raw_spin_unlock_irq(&p->cgr_lock);
	put_affine_portal();
	return ret;
}
@@ -2418,7 +2421,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
		return -EINVAL;

	memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
	spin_lock_irqsave(&p->cgr_lock, irqflags);
	raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
	list_del(&cgr->node);
	/*
	 * If there are no other CGR objects for this CGRID in the list,
@@ -2443,7 +2446,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
		/* add back to the list */
		list_add(&cgr->node, &p->cgr_cbs);
release_lock:
	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
	raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
	put_affine_portal();
	return ret;
}
@@ -2483,9 +2486,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
	if (!p)
		return -EINVAL;

	spin_lock_irqsave(&p->cgr_lock, irqflags);
	raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
	ret = qm_modify_cgr(cgr, 0, opts);
	spin_unlock_irqrestore(&p->cgr_lock, irqflags);
	raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
	put_affine_portal();
	return ret;
}