Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37d6a3be authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

soc: qcom: Fix recursive spinlock in rmnet TX context



rtnl_lock() should not be called from TX context even if
rtnl_trylock() is used since TX path may operate in
softirq NET_TX context.

This patch fixes the recursive spinlock which could occur
with the following call stack. A new worker thread is
spawned if the grant is 0. The bearer is also passed
along to this work queue so that the exact queue operation
is performed depending on the instantaneous limits rather
than explicitly disabling the flow.

The workqueue is also converted to type high priority.

000|arch_counter_get_cntvct(inline)
-000|__delay(cycles = 19200)
-001|__const_udelay(?)
-002|msm_trigger_wdog_bite()
-003|spin_dump(inline)
-003|spin_bug(lock, msg)
-004|debug_spin_lock_before(inline)
-004|do_raw_spin_lock(lock)
-005|raw_spin_lock(?)
-006|__mutex_unlock_slowpath(?)
-007|mutex_unlock(lock)
-008|__rtnl_unlock()
-009|__read_once_size(inline)
-009|list_empty(inline)
-009|netdev_run_todo()
-010|rtnl_unlock()
-011|dfc_qmi_burst_check(dev, qos, skb)
-012|qmi_rmnet_burst_fc_check(dev, skb)
-013|rmnet_vnd_start_xmit(skb, dev)
-014|__netdev_start_xmit(inline)
-014|netdev_start_xmit(inline)
-014|xmit_one(inline)
-014|dev_hard_start_xmit(first, dev, txq)
-015|sch_direct_xmit(skb, q, dev, txq)
-016|__qdisc_run(q)
-017|__dev_xmit_skb(inline)
-017|__dev_queue_xmit(skb, ?)
-018|dev_queue_xmit(?)
-019|neigh_direct_output(?, ?)
-020|neigh_output(inline)
-020|ip6_finish_output2(net, ?, skb)
-021|ip6_finish_output(net, sk, skb)
-022|NF_HOOK_COND(inline)
-022|ip6_output(net, sk, skb)
-023|dst_output(inline)
-023|NF_HOOK(inline)
-023|mld_sendpack(skb)
-024|mld_ifc_timer_expire(data)
-025|__read_once_size(inline)
-025|static_key_count(inline)
-025|static_key_false(inline)
-025|trace_timer_expire_exit(inline)
-025|call_timer_fn()
-026|expire_timers(inline)
-026|__run_timers(inline)
-026|run_timer_softirq(?)
-027|__read_once_size(inline)
-027|static_key_count(inline)
-027|static_key_false(inline)
-027|trace_softirq_exit(inline)
-027|__softirqentry_text_start()
-028|do_softirq_own_stack(inline)
-028|invoke_softirq(inline)
-028|irq_exit()
-029|set_irq_regs(inline)
-029|__handle_domain_irq(domain, ?, lookup, regs)
-030|gic_handle_irq()
-031|el1_irq(asm)
-->|exception
-032|__mutex_unlock_slowpath(?)
-033|mutex_unlock(lock)
-034|__rtnl_unlock()
-035|__read_once_size(inline)
-035|list_empty(inline)
-035|netdev_run_todo()
-036|rtnl_unlock(inline)
-036|rtnetlink_rcv_msg(sk, ?, extack)
-037|netlink_rcv_skb(skb, cb)
-038|rtnetlink_rcv(?)
-039|netlink_unicast_kernel(inline)
-039|netlink_unicast(ssk, skb, portid, ?)
-040|netlink_sendmsg(?, msg, ?)
-041|sock_sendmsg_nosec(inline)
-041|sock_sendmsg(sock, msg)
-042|SYSC_sendto(inline)
-042|sys_sendto(?, buff, ?, flags, ?, ?)
-043|el0_svc_naked(asm)
-->|exception
-044|NUX:0x759BCC4AAC(asm)
---|end of frame

CRs-Fixed: 2292152
Change-Id: I340aa3989e085338cb768785f1f632c0a9ea11e0
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 3fd2e0f2
Loading
Loading
Loading
Loading
+97 −23
Original line number Original line Diff line number Diff line
@@ -40,6 +40,14 @@ struct dfc_svc_ind {
	void *dfc_info;
	void *dfc_info;
};
};


struct dfc_burst_ind {
	struct work_struct work;
	struct net_device *dev;
	struct qos_info *qos;
	struct rmnet_bearer_map *bearer;
	struct dfc_qmi_data *data;
};

static void dfc_svc_init(struct work_struct *work);
static void dfc_svc_init(struct work_struct *work);
static void dfc_do_burst_flow_control(struct work_struct *work);
static void dfc_do_burst_flow_control(struct work_struct *work);


@@ -660,6 +668,57 @@ static void dfc_do_burst_flow_control(struct work_struct *work)
	local_bh_enable();
	local_bh_enable();
}
}


static void dfc_bearer_limit_work(struct work_struct *work)
{
	struct dfc_burst_ind *dfc_ind = (struct dfc_burst_ind *)work;
	struct rmnet_flow_map *itm;
	struct list_head *p;
	int qlen, fc;

	local_bh_disable();

	/* enable transmit on device so that the other
	 * flows which transmit proceed normally.
	 * do it here under bh disabled so that the TX softirq
	 * may not run here
	 */
	netif_start_queue(dfc_ind->dev);

	while (!rtnl_trylock()) {
		if (!dfc_ind->data->restart_state) {
			cond_resched_softirq();
		} else {
			kfree(dfc_ind);
			local_bh_enable();
			return;
		}
	}

	fc = dfc_ind->bearer->grant_size ? 1 : 0;
	/* if grant size is non zero here, we must have already
	 * got an updated grant. do nothing in that case
	 */
	if (fc)
		goto done;

	list_for_each(p, &dfc_ind->qos->flow_head) {
		itm = list_entry(p, struct rmnet_flow_map, list);

		if (itm->bearer_id == dfc_ind->bearer->bearer_id) {
			qlen = tc_qdisc_flow_control(dfc_ind->dev,
						     itm->tcm_handle, fc);
			trace_dfc_qmi_tc_limit(itm->bearer_id, itm->flow_id,
					       dfc_ind->bearer->grant_size,
					       qlen, itm->tcm_handle, fc);
		}
	}

done:
	kfree(dfc_ind);
	rtnl_unlock();
	local_bh_enable();
}

static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
			    struct qmi_txn *txn, const void *data)
			    struct qmi_txn *txn, const void *data)
{
{
@@ -780,7 +839,7 @@ int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi)
	data->index = index;
	data->index = index;
	data->restart_state = 0;
	data->restart_state = 0;


	data->dfc_wq = create_singlethread_workqueue("dfc_wq");
	data->dfc_wq = alloc_workqueue("dfc_wq", WQ_HIGHPRI, 1);
	if (!data->dfc_wq) {
	if (!data->dfc_wq) {
		pr_err("%s Could not create workqueue\n", __func__);
		pr_err("%s Could not create workqueue\n", __func__);
		goto err0;
		goto err0;
@@ -833,39 +892,54 @@ void dfc_qmi_client_exit(void *dfc_data)
}
}


void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
			 struct sk_buff *skb)
			 struct sk_buff *skb, struct qmi_info *qmi)
{
{
	struct rmnet_bearer_map *bearer;
	struct rmnet_bearer_map *bearer;
	struct dfc_burst_ind *dfc_ind;
	struct rmnet_flow_map *itm;
	struct rmnet_flow_map *itm;
	struct dfc_qmi_data *data;
	int ip_type;
	int ip_type;


	if (!qos)
	ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET;

	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
	if (!itm)
		return;
		return;


	if (!rtnl_trylock())
	bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
	if (unlikely(!bearer))
		return;
		return;


	ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET;
	trace_dfc_flow_check(bearer->bearer_id, skb->len, bearer->grant_size);


	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
	if (!bearer->grant_size)
	if (itm) {
		return;
		bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);

		if (unlikely(!bearer)) {
	if (skb->len < bearer->grant_size) {
			rtnl_unlock();
		bearer->grant_size -= skb->len;
		return;
		return;
	}
	}


		trace_dfc_flow_check(bearer->bearer_id,
	data = (struct dfc_qmi_data *)qmi_rmnet_has_dfc_client(qmi);
				     skb->len, bearer->grant_size);
	if (!data)
		return;

	dfc_ind = kzalloc(sizeof(*dfc_ind), GFP_ATOMIC);
	if (!dfc_ind)
		return;

	INIT_WORK((struct work_struct *)dfc_ind, dfc_bearer_limit_work);

	dfc_ind->dev = dev;
	dfc_ind->qos = qos;
	dfc_ind->bearer = bearer;
	dfc_ind->data = data;


		if (skb->len >= bearer->grant_size) {
	bearer->grant_size = 0;
	bearer->grant_size = 0;
			dfc_bearer_flow_ctl(dev, qos, bearer->bearer_id,
					    bearer->grant_size, 0);
		} else {
			bearer->grant_size -= skb->len;
		}
	}


	rtnl_unlock();
	/* stop the flow in hope that the worker thread is
	 * immediately scheduled beyond this point of time
	 */
	netif_stop_queue(dev);
	queue_work(data->dfc_wq, (struct work_struct *)dfc_ind);
}
}
+6 −7
Original line number Original line Diff line number Diff line
@@ -75,20 +75,19 @@ static struct qmi_info *qmi_rmnet_qmi_init(void)
	return qmi_info;
	return qmi_info;
}
}


static inline int
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
{
	int i;
	int i;


	if (!qmi || !(qmi->flag & FLAG_DFC_MASK))
	if (!qmi || !(qmi->flag & FLAG_DFC_MASK))
		return 0;
		return NULL;


	for (i = 0; i < MAX_CLIENT_NUM; i++) {
	for (i = 0; i < MAX_CLIENT_NUM; i++) {
		if (qmi->fc_info[i].dfc_client)
		if (qmi->fc_info[i].dfc_client)
			return 1;
			return qmi->fc_info[i].dfc_client;
	}
	}


	return 0;
	return NULL;
}
}


static inline int
static inline int
@@ -97,7 +96,7 @@ qmi_rmnet_has_client(struct qmi_info *qmi)
	if (qmi->wda_client)
	if (qmi->wda_client)
		return 1;
		return 1;


	return qmi_rmnet_has_dfc_client(qmi);
	return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
}
}


#ifdef CONFIG_QCOM_QMI_DFC
#ifdef CONFIG_QCOM_QMI_DFC
@@ -528,7 +527,7 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb)
	if (!qmi || !qos)
	if (!qmi || !qos)
		return;
		return;


	dfc_qmi_burst_check(dev, qos, skb);
	dfc_qmi_burst_check(dev, qos, skb, qmi);
}
}
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);


+13 −4
Original line number Original line Diff line number Diff line
@@ -106,7 +106,10 @@ int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
void dfc_qmi_client_exit(void *dfc_data);
void dfc_qmi_client_exit(void *dfc_data);


void dfc_qmi_burst_check(struct net_device *dev,
void dfc_qmi_burst_check(struct net_device *dev,
			 struct qos_info *qos, struct sk_buff *skb);
			 struct qos_info *qos, struct sk_buff *skb,
			 struct qmi_info *qmi);

void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);
#else
#else
static inline struct rmnet_flow_map *
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -131,11 +134,17 @@ static inline void dfc_qmi_client_exit(void *dfc_data)
{
{
}
}


static inline void dfc_qmi_burst_check(struct net_device *dev,
static inline void
					struct qos_info *qos,
dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
					struct sk_buff *skb)
		    struct sk_buff *skb, struct qmi_info *qmi)
{
{
}
}

static inline
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
	return NULL;
}
#endif
#endif


#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
+17 −1
Original line number Original line Diff line number Diff line
@@ -18,7 +18,7 @@


#include <linux/tracepoint.h>
#include <linux/tracepoint.h>


TRACE_EVENT(dfc_qmi_tc,
DECLARE_EVENT_CLASS(dfc_tc,


	TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen,
	TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen,
		 u32 tcm_handle, int enable),
		 u32 tcm_handle, int enable),
@@ -50,6 +50,22 @@ TRACE_EVENT(dfc_qmi_tc,
		__entry->enable ? "enable" : "disable")
		__entry->enable ? "enable" : "disable")
);
);


DEFINE_EVENT(dfc_tc, dfc_qmi_tc,

	TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen,
		 u32 tcm_handle, int enable),

	TP_ARGS(bearer_id, flow_id, grant, qlen, tcm_handle, enable)
);

DEFINE_EVENT(dfc_tc, dfc_qmi_tc_limit,

	TP_PROTO(u8 bearer_id, u32 flow_id, u32 grant, int qlen,
		 u32 tcm_handle, int enable),

	TP_ARGS(bearer_id, flow_id, grant, qlen, tcm_handle, enable)
);

TRACE_EVENT(dfc_flow_ind,
TRACE_EVENT(dfc_flow_ind,


	TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant,
	TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant,