Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b2f06b2 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan Committed by Gerrit - the friendly Code Review server
Browse files

soc: qcom: dfc: Traffic control optimization



Optimized data structures to avoid frequent list lookup
and redundant flow control for dual IP calls. Supports
using skb mark for tx queue assignment.

Change-Id: I94d8ee6bf30778ad8e4eb0cfeacf7c18267d20da
Acked-by: default avatarWeiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent f5dfbfac
Loading
Loading
Loading
Loading
+31 −28
Original line number Diff line number Diff line
@@ -18,8 +18,6 @@
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)

#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)

#define DFC_MAX_QOS_ID_V01 2

#define DFC_ACK_TYPE_DISABLE 1
@@ -988,26 +986,23 @@ int dfc_bearer_flow_ctl(struct net_device *dev,
			struct rmnet_bearer_map *bearer,
			struct qos_info *qos)
{
	struct rmnet_flow_map *itm;
	int rc = 0, qlen;
	int enable;
	int i;

	enable = bearer->grant_size ? 1 : 0;

	list_for_each_entry(itm, &qos->flow_head, list) {
		if (itm->bearer_id == bearer->bearer_id) {
			/*
			 * Do not flow disable ancillary q if ancillary is true
			 */
			if (bearer->tcp_bidir && enable == 0 &&
					DFC_IS_ANCILLARY(itm->ip_type))
	for (i = 0; i < MAX_MQ_NUM; i++) {
		if (qos->mq[i].bearer == bearer) {
			/* Do not flow disable ancillary q in tcp bidir */
			if (qos->mq[i].ancillary &&
			    bearer->tcp_bidir && !enable)
				continue;

			qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle,
						    enable);
			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
					 itm->flow_id, bearer->grant_size,
					 qlen, itm->tcm_handle, enable);
			qlen = qmi_rmnet_flow_control(dev, i, enable);
			trace_dfc_qmi_tc(dev->name, bearer->bearer_id,
					 bearer->grant_size,
					 qlen, i, enable);
			rc++;
		}
	}
@@ -1025,9 +1020,9 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
				struct dfc_flow_status_info_type_v01 *fc_info)
{
	struct rmnet_bearer_map *bearer_itm;
	struct rmnet_flow_map *flow_itm;
	int rc = 0, qlen;
	bool enable;
	int i;

	enable = fc_info->num_bytes > 0 ? 1 : 0;

@@ -1042,12 +1037,14 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
		bearer_itm->last_seq = fc_info->seq_num;
	}

	list_for_each_entry(flow_itm, &qos->flow_head, list) {
		qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle,
					      enable);
		trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id,
				 flow_itm->flow_id, fc_info->num_bytes,
				 qlen, flow_itm->tcm_handle, enable);
	for (i = 0; i < MAX_MQ_NUM; i++) {
		bearer_itm = qos->mq[i].bearer;
		if (!bearer_itm)
			continue;
		qlen = qmi_rmnet_flow_control(dev, i, enable);
		trace_dfc_qmi_tc(dev->name, bearer_itm->bearer_id,
				 fc_info->num_bytes,
				 qlen, i, enable);
		rc++;
	}

@@ -1510,22 +1507,28 @@ void dfc_qmi_client_exit(void *dfc_data)
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
			 int ip_type, u32 mark, unsigned int len)
{
	struct rmnet_bearer_map *bearer;
	struct rmnet_bearer_map *bearer = NULL;
	struct rmnet_flow_map *itm;
	u32 start_grant;

	spin_lock_bh(&qos->qos_lock);

	if (dfc_mode == DFC_MODE_MQ_NUM) {
		/* Mark is mq num */
		if (likely(mark < MAX_MQ_NUM))
			bearer = qos->mq[mark].bearer;
	} else {
		/* Mark is flow_id */
		itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
	if (unlikely(!itm))
		goto out;
		if (likely(itm))
			bearer = itm->bearer;
	}

	bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
	if (unlikely(!bearer))
		goto out;

	trace_dfc_flow_check(dev->name, bearer->bearer_id,
			     len, bearer->grant_size);
			     len, mark, bearer->grant_size);

	if (!bearer->grant_size)
		goto out;
+109 −65
Original line number Diff line number Diff line
@@ -23,7 +23,12 @@

#define FLAG_DFC_MASK 0x000F
#define FLAG_POWERSAVE_MASK 0x0010
#define DFC_MODE_MULTIQ 2
#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
#define DFC_SUPPORTED_MODE(m) \
	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)

int dfc_mode;
#define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)

unsigned int rmnet_wq_frequency __read_mostly = 1000;

@@ -36,6 +41,10 @@ unsigned int rmnet_wq_frequency __read_mostly = 1000;
static unsigned int qmi_rmnet_scale_factor = 5;
#endif

static int
qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
		   struct qmi_info *qmi);

struct qmi_elem_info data_ep_id_type_v01_ei[] = {
	{
		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
@@ -73,7 +82,7 @@ void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
	int i;

	if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
	if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
		return NULL;

	for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -128,6 +137,8 @@ qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
		list_del(&bearer->list);
		kfree(bearer);
	}

	memset(qos->mq, 0, sizeof(qos->mq));
}

struct rmnet_flow_map *
@@ -166,17 +177,17 @@ static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
	itm->bearer_id = new_map->bearer_id;
	itm->flow_id = new_map->flow_id;
	itm->ip_type = new_map->ip_type;
	itm->tcm_handle = new_map->tcm_handle;
	itm->mq_idx = new_map->mq_idx;
}

int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
{
	struct netdev_queue *q;

	if (unlikely(tcm_handle >= dev->num_tx_queues))
	if (unlikely(mq_idx >= dev->num_tx_queues))
		return 0;

	q = netdev_get_tx_queue(dev, tcm_handle);
	q = netdev_get_tx_queue(dev, mq_idx);
	if (unlikely(!q))
		return 0;

@@ -209,31 +220,44 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
	struct rmnet_flow_map new_map, *itm;
	struct rmnet_bearer_map *bearer;
	struct tcmsg tmp_tcm;
	struct mq_map *mq;
	u32 mq_idx;

	if (!qos_info)
	if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
		return -EINVAL;

	ASSERT_RTNL();

	/* flow activate
	 * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
	 * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle
	 * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - mq_idx
	 */

	new_map.bearer_id = tcm->tcm__pad1;
	new_map.flow_id = tcm->tcm_parent;
	new_map.ip_type = tcm->tcm_ifindex;
	new_map.tcm_handle = tcm->tcm_handle;
	new_map.mq_idx = tcm->tcm_handle;
	trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
			    new_map.ip_type, new_map.tcm_handle, 1);
			    new_map.ip_type, new_map.mq_idx, 1);

again:
	spin_lock_bh(&qos_info->qos_lock);

	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
				     new_map.ip_type);
	if (itm) {
		qmi_rmnet_update_flow_map(itm, &new_map);
	} else {
		pr_debug("%s: stale flow found\n", __func__);
		tmp_tcm.tcm__pad1 = itm->bearer_id;
		tmp_tcm.tcm_parent = itm->flow_id;
		tmp_tcm.tcm_ifindex = itm->ip_type;
		tmp_tcm.tcm_handle = itm->mq_idx;
		spin_unlock_bh(&qos_info->qos_lock);
		qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
		goto again;
	}

	/* Create flow map */
	itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
	if (!itm) {
		spin_unlock_bh(&qos_info->qos_lock);
@@ -243,6 +267,7 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
	qmi_rmnet_update_flow_map(itm, &new_map);
	list_add(&itm->list, &qos_info->flow_head);

	/* Create or update bearer map */
	bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
	if (bearer) {
		bearer->flow_ref++;
@@ -256,22 +281,31 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
		bearer->bearer_id = new_map.bearer_id;
		bearer->flow_ref = 1;
		bearer->grant_size = qos_info->default_grant;
			bearer->grant_thresh =
				qmi_rmnet_grant_per(bearer->grant_size);
		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
		qos_info->default_grant = DEFAULT_GRANT;
		list_add(&bearer->list, &qos_info->bearer_head);
	}
	itm->bearer = bearer;

		qmi_rmnet_flow_control(dev, itm->tcm_handle,
				bearer->grant_size > 0 ? 1 : 0);
	/* Update mq map */
	mq_idx = tcm->tcm_handle;
	mq = &qos_info->mq[mq_idx];
	if (!mq->bearer) {
		mq->bearer = bearer;
		mq->ancillary = IS_ANCILLARY(new_map.ip_type);

		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
				 bearer->grant_size, 0, itm->tcm_handle,
		qmi_rmnet_flow_control(dev, mq_idx,
				       bearer->grant_size > 0 ? 1 : 0);
		trace_dfc_qmi_tc(dev->name, itm->bearer_id,
				 bearer->grant_size, 0, mq_idx,
				 bearer->grant_size > 0 ? 1 : 0);

	} else if (mq->bearer->bearer_id != new_map.bearer_id) {
		pr_debug("%s: un-managered bearer %u\n",
				__func__, new_map.bearer_id);
	}

	spin_unlock_bh(&qos_info->qos_lock);

	return 0;
}

@@ -282,6 +316,8 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
	struct rmnet_flow_map new_map, *itm;
	struct rmnet_bearer_map *bearer;
	struct mq_map *mq;
	u32 mq_idx;

	if (!qos_info)
		return -EINVAL;
@@ -303,35 +339,38 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
	if (itm) {
		trace_dfc_flow_info(dev->name, new_map.bearer_id,
				    new_map.flow_id, new_map.ip_type,
				    itm->tcm_handle, 0);
		list_del(&itm->list);
				    itm->mq_idx, 0);

		/*clear bearer map*/
		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
		bearer = itm->bearer;
		if (bearer && --bearer->flow_ref == 0) {
			list_del(&bearer->list);
			kfree(bearer);
			/* Remove the bearer from mq map */
			for (mq_idx = 0; mq_idx < MAX_MQ_NUM; mq_idx++) {
				mq = &qos_info->mq[mq_idx];
				if (mq->bearer != bearer)
					continue;

			/* Purge pending packets for dedicated flow */
			if (itm->flow_id)
				qmi_rmnet_reset_txq(dev, itm->tcm_handle);
				mq->bearer = NULL;
				mq->ancillary = false;
				qmi_rmnet_reset_txq(dev, mq_idx);
				qmi_rmnet_flow_control(dev, mq_idx, 1);
				trace_dfc_qmi_tc(dev->name,
					new_map.bearer_id, 0, 0, mq_idx, 1);
			}

		/* Enable flow to allow new flow setup */
		qmi_rmnet_flow_control(dev, itm->tcm_handle, 1);
		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
				 0, 0, itm->tcm_handle, 1);
			/* Remove from bearer map */
			list_del(&bearer->list);
			kfree(bearer);
		}

		/* Remove from flow map */
		list_del(&itm->list);
		kfree(itm);
	}

	if (list_empty(&qos_info->flow_head)) {
	if (list_empty(&qos_info->flow_head))
		netif_tx_wake_all_queues(dev);
		trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
	}

	spin_unlock_bh(&qos_info->qos_lock);

	return 0;
}

@@ -408,7 +447,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
	svc.ep_type = tcm->tcm_info;
	svc.iface_id = tcm->tcm_parent;

	if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
	if (DFC_SUPPORTED_MODE(FLAG_TO_MODE(tcm->tcm_ifindex)) &&
	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
		rc = dfc_qmi_client_init(port, idx, &svc, qmi);
		if (rc < 0)
@@ -485,20 +524,21 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)

	switch (tcm->tcm_family) {
	case NLMSG_FLOW_ACTIVATE:
		if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) ||
		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)) ||
		    !qmi_rmnet_has_dfc_client(qmi))
			return;

		qmi_rmnet_add_flow(dev, tcm, qmi);
		break;
	case NLMSG_FLOW_DEACTIVATE:
		if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
			return;

		qmi_rmnet_del_flow(dev, tcm, qmi);
		break;
	case NLMSG_CLIENT_SETUP:
		if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) &&
		dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
		if (!DFC_SUPPORTED_MODE(dfc_mode) &&
		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
			return;

@@ -651,6 +691,10 @@ int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
	if (!qos)
		return 0;

	/* If mark is mq num return it */
	if (dfc_mode == DFC_MODE_MQ_NUM)
		return mark;

	switch (skb->protocol) {
	/* TCPv4 ACKs */
	case htons(ETH_P_IP):
@@ -685,7 +729,7 @@ int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
	if (unlikely(!itm))
		goto done;

	txq = itm->tcm_handle;
	txq = itm->mq_idx;

done:
	spin_unlock_bh(&qos->qos_lock);
@@ -703,7 +747,7 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
{
	struct qos_info *qos;

	qos = kmalloc(sizeof(*qos), GFP_KERNEL);
	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
	if (!qos)
		return NULL;

@@ -930,7 +974,7 @@ void qmi_rmnet_work_init(void *port)
	rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
					WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);

	rmnet_work = kmalloc(sizeof(*rmnet_work), GFP_ATOMIC);
	rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
	if (!rmnet_work) {
		destroy_workqueue(rmnet_ps_wq);
		rmnet_ps_wq = NULL;
+20 −13
Original line number Diff line number Diff line
@@ -12,18 +12,15 @@
#define IP_VER_4 4
#define IP_VER_6 6

#define MAX_MQ_NUM 10
#define MAX_CLIENT_NUM 2
#define MAX_FLOW_NUM 32
#define DEFAULT_GRANT 1
#define DFC_MAX_BEARERS_V01 16

struct rmnet_flow_map {
	struct list_head list;
	u8 bearer_id;
	u32 flow_id;
	int ip_type;
	u32 tcm_handle;
};
#define DFC_MODE_FLOW_ID 2
#define DFC_MODE_MQ_NUM 3
extern int dfc_mode;

struct rmnet_bearer_map {
	struct list_head list;
@@ -40,27 +37,37 @@ struct rmnet_bearer_map {
	bool tx_off;
};

struct rmnet_flow_map {
	struct list_head list;
	u8 bearer_id;
	u32 flow_id;
	int ip_type;
	u32 mq_idx;
	struct rmnet_bearer_map *bearer;
};

struct svc_info {
	u32 instance;
	u32 ep_type;
	u32 iface_id;
};

struct mq_map {
	struct rmnet_bearer_map *bearer;
	bool ancillary;
};

struct qos_info {
	u8 mux_id;
	struct net_device *real_dev;
	struct list_head flow_head;
	struct list_head bearer_head;
	struct mq_map mq[MAX_MQ_NUM];
	u32 default_grant;
	u32 tran_num;
	spinlock_t qos_lock;
};

struct flow_info {
	struct net_device *dev;
	struct rmnet_flow_map *itm;
};

struct qmi_info {
	int flag;
	void *wda_client;
@@ -111,7 +118,7 @@ void dfc_qmi_client_exit(void *dfc_data);
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
			 int ip_type, u32 mark, unsigned int len);

int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable);
int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable);

void dfc_qmi_query_flow(void *dfc_data);

+13 −12
Original line number Diff line number Diff line
@@ -12,15 +12,14 @@

TRACE_EVENT(dfc_qmi_tc,

	TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant,
	TP_PROTO(const char *name, u8 bearer_id, u32 grant,
		 int qlen, u32 tcm_handle, int enable),

	TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable),
	TP_ARGS(name, bearer_id, grant, qlen, tcm_handle, enable),

	TP_STRUCT__entry(
		__string(dev_name, name)
		__field(u8, bid)
		__field(u32, fid)
		__field(u32, grant)
		__field(int, qlen)
		__field(u32, tcm_handle)
@@ -30,16 +29,15 @@ TRACE_EVENT(dfc_qmi_tc,
	TP_fast_assign(
		__assign_str(dev_name, name);
		__entry->bid = bearer_id;
		__entry->fid = flow_id;
		__entry->grant = grant;
		__entry->qlen = qlen;
		__entry->tcm_handle = tcm_handle;
		__entry->enable = enable;
	),

	TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s",
	TP_printk("dev=%s bearer_id=%u grant=%u len=%d mq=%u %s",
		__get_str(dev_name),
		__entry->bid, __entry->grant, __entry->qlen, __entry->fid,
		__entry->bid, __entry->grant, __entry->qlen,
		__entry->tcm_handle,
		__entry->enable ? "enable" : "disable")
);
@@ -82,14 +80,16 @@ TRACE_EVENT(dfc_flow_ind,

TRACE_EVENT(dfc_flow_check,

	TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant),
	TP_PROTO(const char *name, u8 bearer_id, unsigned int len,
		 u32 mark, u32 grant),

	TP_ARGS(name, bearer_id, len, grant),
	TP_ARGS(name, bearer_id, len, mark, grant),

	TP_STRUCT__entry(
		__string(dev_name, name)
		__field(u8, bearer_id)
		__field(unsigned int, len)
		__field(u32, mark)
		__field(u32, grant)
	),

@@ -97,12 +97,13 @@ TRACE_EVENT(dfc_flow_check,
		__assign_str(dev_name, name)
		__entry->bearer_id = bearer_id;
		__entry->len = len;
		__entry->mark = mark;
		__entry->grant = grant;
	),

	TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u",
		__get_str(dev_name),
		__entry->bearer_id, __entry->len, __entry->grant)
	TP_printk("dev=%s bearer_id=%u skb_len=%u mark=%u current_grant=%u",
		__get_str(dev_name), __entry->bearer_id,
		__entry->len, __entry->mark, __entry->grant)
);

TRACE_EVENT(dfc_flow_info,
@@ -130,7 +131,7 @@ TRACE_EVENT(dfc_flow_info,
		__entry->action = add;
	),

	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d",
	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d mq=%d",
		__entry->action ? "add flow" : "delete flow",
		__get_str(dev_name),
		__entry->bid, __entry->fid, __entry->ip, __entry->handle)