Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ffd042f1 authored by Sharath Chandra Vurukala's avatar Sharath Chandra Vurukala Committed by Gerrit - the friendly Code Review server
Browse files

dfc: reset tx queue



When a bearer is removed, calling qdisc reset on a tx queue
could have a race condition with qdisc dequeue for lockless
qdisc such as pfifo_fast.

This change uses a different mechanism not relying on qdisc
implementation to achieve packet purge on bearer remove.

Change-Id: I8f9201809853b07293896d6cb8e010e9e0904e46
Signed-off-by: default avatarWeiyi Chen <quic_weiyic@quicinc.com>
Signed-off-by: default avatarSharath Chandra Vurukala <quic_sharathv@quicinc.com>
parent b6a0597c
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
 *
 * RMNET Data virtual network driver
 *
@@ -57,6 +58,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
	int ip_type;
	u32 mark;
	unsigned int len;
	bool need_to_drop = false;

	priv = netdev_priv(dev);
	if (priv->real_dev) {
@@ -65,6 +67,14 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
		mark = skb->mark;
		len = skb->len;
		trace_rmnet_xmit_skb(skb);

		qmi_rmnet_get_flow_state(dev, skb, &need_to_drop);
		if (unlikely(need_to_drop)) {
			this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
			kfree_skb(skb);
			return NETDEV_TX_OK;
		}

		rmnet_egress_handler(skb);
		qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
		qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev));
+37 −25
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
 */

#include <soc/qcom/qmi_rmnet.h>
@@ -214,21 +214,6 @@ int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
	return 0;
}

static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
{
	struct Qdisc *qdisc;

	if (unlikely(txq >= dev->num_tx_queues))
		return;

	qdisc = rtnl_dereference(netdev_get_tx_queue(dev, txq)->qdisc);
	if (qdisc) {
		spin_lock_bh(qdisc_lock(qdisc));
		qdisc_reset(qdisc);
		spin_unlock_bh(qdisc_lock(qdisc));
	}
}

/**
 * qmi_rmnet_watchdog_fn - watchdog timer func
 */
@@ -354,17 +339,18 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
				continue;

			mq->bearer = NULL;
			if (reset) {
				qmi_rmnet_reset_txq(dev, i);
				qmi_rmnet_flow_control(dev, i, 1);
			mq->drop_on_remove = reset;
			/* Let other CPU's see this update so that packets are
			 * dropped, instead of further processing packets
			 */
			smp_mb();

			qmi_rmnet_flow_control(dev, i, 1);
			if (dfc_mode == DFC_MODE_SA) {
				j = i + ACK_MQ_OFFSET;
					qmi_rmnet_reset_txq(dev, j);
				qmi_rmnet_flow_control(dev, j, 1);
			}
		}
		}

		/* Remove from bearer map */
		list_del(&bearer->list);
@@ -386,6 +372,11 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
	mq = &qos_info->mq[itm->mq_idx];
	if (!mq->bearer) {
		mq->bearer = bearer;
		mq->drop_on_remove = false;
		/* Let other CPU's see this update so that packets are
		 * dropped, instead of further processing packets
		 */
		smp_mb();

		if (dfc_mode == DFC_MODE_SA) {
			bearer->mq_idx = itm->mq_idx;
@@ -862,6 +853,27 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);

#ifdef CONFIG_QCOM_QMI_DFC
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
			      bool *drop)
{
	struct qos_info *qos = rmnet_get_qos_pt(dev);
	int txq = skb->queue_mapping;

	if (txq > ACK_MQ_OFFSET)
		txq -= ACK_MQ_OFFSET;

	if (unlikely(!qos || txq >= MAX_MQ_NUM))
		return false;

	/* If the bearer is gone, packets may need to be dropped */
	*drop = (txq != DEFAULT_MQ_NUM && !READ_ONCE(qos->mq[txq].bearer) &&
		 READ_ONCE(qos->mq[txq].drop_on_remove));

	return true;
}
EXPORT_SYMBOL(qmi_rmnet_get_flow_state);


void qmi_rmnet_burst_fc_check(struct net_device *dev,
			      int ip_type, u32 mark, unsigned int len)
{
+2 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
 */

#ifndef _RMNET_QMI_I_H
@@ -73,6 +73,7 @@ struct svc_info {

struct mq_map {
	struct rmnet_bearer_map *bearer;
	bool drop_on_remove;
};

struct qos_info {
+10 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#ifndef _QMI_RMNET_H
@@ -48,6 +49,8 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev,
			 struct net_device *vnd_dev, u8 mux_id);
void qmi_rmnet_qos_exit_pre(void *qos);
void qmi_rmnet_qos_exit_post(void);
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
			      bool *drop);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
			      int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
@@ -67,6 +70,13 @@ static inline void qmi_rmnet_qos_exit_post(void)
{
}

static inline bool qmi_rmnet_get_flow_state(struct net_device *dev,
					    struct sk_buff *skb,
					    bool *drop)
{
	return false;
}

static inline void
qmi_rmnet_burst_fc_check(struct net_device *dev,
			 int ip_type, u32 mark, unsigned int len)