Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4975cd0 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

soc: qcom: dfc: Enhance ndo_select_queue



Rather than relying on multiq and tc filter to set the queue
mapping via skb editing, reassign the queue using the bearer
information. This will help to avoid HOLB as much as possible.

CRs-fixed: 2305006
Change-Id: Ib104d74299b38eb49b4d4c76b699230465ab0046
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent d465dc11
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -166,7 +166,13 @@ static u16 rmnet_vnd_select_queue(struct net_device *dev,
				  void *accel_priv,
				  select_queue_fallback_t fallback)
{
	return 0;
	struct rmnet_priv *priv = netdev_priv(dev);
	int txq = 0;

	if (priv->real_dev)
		txq = qmi_rmnet_get_queue(dev, skb);

	return (txq < dev->real_num_tx_queues) ? txq : 0;
}

static const struct net_device_ops rmnet_vnd_ops = {
+2 −2
Original line number Diff line number Diff line
@@ -1016,7 +1016,7 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
	struct rmnet_flow_map *itm;
	u32 start_grant;

	spin_lock(&qos->qos_lock);
	spin_lock_bh(&qos->qos_lock);

	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
	if (unlikely(!itm))
@@ -1049,7 +1049,7 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
		dfc_bearer_flow_ctl(dev, bearer, qos);

out:
	spin_unlock(&qos->qos_lock);
	spin_unlock_bh(&qos->qos_lock);
}

void dfc_qmi_wq_flush(struct qmi_info *qmi)
+55 −0
Original line number Diff line number Diff line
@@ -21,6 +21,8 @@
#include <trace/events/dfc.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>

#define NLMSG_FLOW_ACTIVATE 1
#define NLMSG_FLOW_DEACTIVATE 2
@@ -559,6 +561,59 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev,
}
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);

int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
{
	struct qos_info *qos = rmnet_get_qos_pt(dev);
	int txq = 0, ip_type = AF_INET;
	unsigned int len = skb->len;
	struct rmnet_flow_map *itm;
	u32 mark = skb->mark;

	if (!qos)
		return 0;

	switch (skb->protocol) {
	/* TCPv4 ACKs */
	case htons(ETH_P_IP):
		ip_type = AF_INET;
		if ((!mark) &&
		    (ip_hdr(skb)->protocol == IPPROTO_TCP) &&
		    (len == 40 || len == 52) &&
		    (ip_hdr(skb)->ihl == 5) &&
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
			return 1;
		break;

	/* TCPv6 ACKs */
	case htons(ETH_P_IPV6):
		ip_type = AF_INET6;
		if ((!mark) &&
		    (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
		    (len == 60 || len == 72) &&
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
			return 1;
		/* Fall through */
	}

	/* Default flows */
	if (!mark)
		return 0;

	/* Dedicated flows */
	spin_lock_bh(&qos->qos_lock);

	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
	if (unlikely(!itm))
		goto done;

	txq = itm->tcm_handle;

done:
	spin_unlock_bh(&qos->qos_lock);
	return txq;
}
EXPORT_SYMBOL(qmi_rmnet_get_queue);

inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
{
	return grant / qmi_rmnet_scale_factor;
+7 −0
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
			      int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
#else
static inline void *
qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
@@ -58,6 +59,12 @@ qmi_rmnet_burst_fc_check(struct net_device *dev,
			 int ip_type, u32 mark, unsigned int len)
{
}

static inline int qmi_rmnet_get_queue(struct net_device *dev,
				       struct sk_buff *skb)
{
	return 0;
}
#endif

#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE