Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cea929a5 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "soc: qcom: dfc: Enhance ndo_select_queue"

parents d0658a03 d4975cd0
Loading
Loading
Loading
Loading
+20 −2
Original line number Diff line number Diff line
@@ -221,6 +221,7 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
		hlist_del_init_rcu(&ep->hlnode);
		rmnet_unregister_bridge(dev, port);
		rmnet_vnd_dellink(mux_id, port, ep);
		synchronize_rcu();
		kfree(ep);
	}
	rmnet_unregister_real_device(real_dev, port);
@@ -244,7 +245,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev)

	port = rmnet_get_port_rtnl(dev);

	rcu_read_lock();
	rmnet_unregister_bridge(dev, port);

	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
@@ -252,10 +252,10 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
		rmnet_vnd_dellink(ep->mux_id, port, ep);

		hlist_del_init_rcu(&ep->hlnode);
		synchronize_rcu();
		kfree(ep);
	}

	rcu_read_unlock();
	unregister_netdevice_many(&list);

	qmi_rmnet_qmi_exit(port->qmi_info, port);
@@ -587,6 +587,24 @@ void rmnet_clear_powersave_format(void *port)
	((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS;
}
EXPORT_SYMBOL(rmnet_clear_powersave_format);

void rmnet_enable_all_flows(void *port)
{
	struct rmnet_endpoint *ep;
	unsigned long bkt;

	if (unlikely(!port))
		return;

	rcu_read_lock();
	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
			  bkt, ep, hlnode) {
		qmi_rmnet_enable_all_flows(ep->egress_dev);
	}
	rcu_read_unlock();
}
EXPORT_SYMBOL(rmnet_enable_all_flows);

#endif

/* Startup/Shutdown */
+4 −10
Original line number Diff line number Diff line
@@ -172,11 +172,8 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
			skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	if ((port->data_format & RMNET_INGRESS_FORMAT_PS) &&
	    !qmi_rmnet_work_get_active(port)) {
		/* register for powersave indications*/
		qmi_rmnet_work_restart(port);
	}
	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
		qmi_rmnet_work_maybe_restart(port);

	skb_trim(skb, len);
	rmnet_deliver_skb(skb, port);
@@ -254,11 +251,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
			return -ENOMEM;
	}

	if ((port->data_format & RMNET_INGRESS_FORMAT_PS) &&
	    !qmi_rmnet_work_get_active(port)) {
		/* register for powersave indications*/
		qmi_rmnet_work_restart(port);
	}
	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
		qmi_rmnet_work_maybe_restart(port);

	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
		rmnet_map_checksum_uplink_packet(skb, orig_dev);
+7 −1
Original line number Diff line number Diff line
@@ -166,7 +166,13 @@ static u16 rmnet_vnd_select_queue(struct net_device *dev,
				  void *accel_priv,
				  select_queue_fallback_t fallback)
{
	return 0;
	struct rmnet_priv *priv = netdev_priv(dev);
	int txq = 0;

	if (priv->real_dev)
		txq = qmi_rmnet_get_queue(dev, skb);

	return (txq < dev->real_num_tx_queues) ? txq : 0;
}

static const struct net_device_ops rmnet_vnd_ops = {
+153 −16
Original line number Diff line number Diff line
@@ -20,6 +20,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/dfc.h>

#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)

#define DFC_MAX_BEARERS_V01 16
#define DFC_MAX_QOS_ID_V01 2

@@ -79,7 +81,7 @@ static void dfc_do_burst_flow_control(struct work_struct *work);
#define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7

#define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 424
#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 540

struct dfc_bind_client_req_msg_v01 {
	u8 ep_id_valid;
@@ -234,12 +236,74 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
	},
};

struct dfc_ancillary_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 reserved;
};

static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
	{
		.data_type	= QMI_UNSIGNED_1_BYTE,
		.elem_len	= 1,
		.elem_size	= sizeof(u8),
		.is_array	= NO_ARRAY,
		.tlv_type	= QMI_COMMON_TLV_TYPE,
		.offset		= offsetof(struct
					   dfc_ancillary_info_type_v01,
					   subs_id),
		.ei_array	= NULL,
	},
	{
		.data_type	= QMI_UNSIGNED_1_BYTE,
		.elem_len	= 1,
		.elem_size	= sizeof(u8),
		.is_array	= NO_ARRAY,
		.tlv_type	= QMI_COMMON_TLV_TYPE,
		.offset		= offsetof(struct
					   dfc_ancillary_info_type_v01,
					   mux_id),
		.ei_array	= NULL,
	},
	{
		.data_type	= QMI_UNSIGNED_1_BYTE,
		.elem_len	= 1,
		.elem_size	= sizeof(u8),
		.is_array	= NO_ARRAY,
		.tlv_type	= QMI_COMMON_TLV_TYPE,
		.offset		= offsetof(struct
					   dfc_ancillary_info_type_v01,
					   bearer_id),
		.ei_array	= NULL,
	},
	{
		.data_type	= QMI_UNSIGNED_4_BYTE,
		.elem_len	= 1,
		.elem_size	= sizeof(u32),
		.is_array	= NO_ARRAY,
		.tlv_type	= QMI_COMMON_TLV_TYPE,
		.offset		= offsetof(struct
					   dfc_ancillary_info_type_v01,
					   reserved),
		.ei_array	= NULL,
	},
	{
		.data_type	= QMI_EOTI,
		.is_array	= NO_ARRAY,
		.tlv_type	= QMI_COMMON_TLV_TYPE,
	},
};

struct dfc_flow_status_ind_msg_v01 {
	u8 flow_status_valid;
	u8 flow_status_len;
	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
	u8 eod_ack_reqd_valid;
	u8 eod_ack_reqd;
	u8 ancillary_info_valid;
	u8 ancillary_info_len;
	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};

struct dfc_svc_ind {
@@ -400,6 +464,40 @@ static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = {
					   eod_ack_reqd),
		.ei_array	= NULL,
	},
	{
		.data_type	= QMI_OPT_FLAG,
		.elem_len	= 1,
		.elem_size	= sizeof(u8),
		.is_array	= NO_ARRAY,
		.tlv_type	= 0x12,
		.offset		= offsetof(struct
					   dfc_flow_status_ind_msg_v01,
					   ancillary_info_valid),
		.ei_array	= NULL,
	},
	{
		.data_type	= QMI_DATA_LEN,
		.elem_len	= 1,
		.elem_size	= sizeof(u8),
		.is_array	= NO_ARRAY,
		.tlv_type	= 0x12,
		.offset		= offsetof(struct
					   dfc_flow_status_ind_msg_v01,
					   ancillary_info_len),
		.ei_array	= NULL,
	},
	{
		.data_type	= QMI_STRUCT,
		.elem_len	= DFC_MAX_BEARERS_V01,
		.elem_size	= sizeof(struct
					 dfc_ancillary_info_type_v01),
		.is_array	= VAR_LEN_ARRAY,
		.tlv_type	= 0x12,
		.offset		= offsetof(struct
					   dfc_flow_status_ind_msg_v01,
					   ancillary_info),
		.ei_array	= dfc_ancillary_info_type_v01_ei,
	},
	{
		.data_type	= QMI_EOTI,
		.is_array	= NO_ARRAY,
@@ -587,11 +685,18 @@ static int dfc_bearer_flow_ctl(struct net_device *dev,
		itm = list_entry(p, struct rmnet_flow_map, list);

		if (itm->bearer_id == bearer->bearer_id) {
			/*
			 * Do not flow disable ancillary q if ancillary is true
			 */
			if (bearer->ancillary && enable == 0 &&
					DFC_IS_ANCILLARY(itm->ip_type))
				continue;

			qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle,
						    enable);
			trace_dfc_qmi_tc(itm->bearer_id, itm->flow_id,
					 bearer->grant_size, qlen,
					 itm->tcm_handle, enable);
			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
					 itm->flow_id, bearer->grant_size,
					 qlen, itm->tcm_handle, enable);
			rc++;
		}
	}
@@ -605,7 +710,7 @@ static int dfc_bearer_flow_ctl(struct net_device *dev,
}

static int dfc_all_bearer_flow_ctl(struct net_device *dev,
				struct qos_info *qos, u8 ack_req,
				struct qos_info *qos, u8 ack_req, u32 ancillary,
				struct dfc_flow_status_info_type_v01 *fc_info)
{
	struct list_head *p;
@@ -621,6 +726,7 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
			qmi_rmnet_grant_per(bearer_itm->grant_size);
		bearer_itm->seq = fc_info->seq_num;
		bearer_itm->ack_req = ack_req;
		bearer_itm->ancillary = ancillary;
	}

	enable = fc_info->num_bytes > 0 ? 1 : 0;
@@ -630,7 +736,7 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
	else
		netif_tx_stop_all_queues(dev);

	trace_dfc_qmi_tc(0xFF, 0, fc_info->num_bytes, 0, 0, enable);
	trace_dfc_qmi_tc(dev->name, 0xFF, 0, fc_info->num_bytes, 0, 0, enable);

	if (enable == 0 && ack_req)
		dfc_send_ack(dev, fc_info->bearer_id,
@@ -641,7 +747,7 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
}

static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
			     u8 ack_req,
			     u8 ack_req, u32 ancillary,
			     struct dfc_flow_status_info_type_v01 *fc_info)
{
	struct rmnet_bearer_map *itm = NULL;
@@ -659,6 +765,7 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
		itm->seq = fc_info->seq_num;
		itm->ack_req = ack_req;
		itm->ancillary = ancillary;

		if (action != -1)
			rc = dfc_bearer_flow_ctl(dev, itm, qos);
@@ -672,13 +779,14 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
static void dfc_do_burst_flow_control(struct work_struct *work)
{
	struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work;
	struct dfc_flow_status_ind_msg_v01 *ind =
		(struct dfc_flow_status_ind_msg_v01 *)&svc_ind->dfc_info;
	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info;
	struct net_device *dev;
	struct qos_info *qos;
	struct dfc_flow_status_info_type_v01 *flow_status;
	struct dfc_ancillary_info_type_v01 *ai;
	u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
	int i;
	u32 ancillary;
	int i, j;

	if (unlikely(svc_ind->data->restart_state)) {
		kfree(svc_ind);
@@ -689,12 +797,27 @@ static void dfc_do_burst_flow_control(struct work_struct *work)

	for (i = 0; i < ind->flow_status_len; i++) {
		flow_status = &ind->flow_status[i];

		ancillary = 0;
		if (ind->ancillary_info_valid) {
			for (j = 0; j < ind->ancillary_info_len; j++) {
				ai = &ind->ancillary_info[j];
				if (ai->mux_id == flow_status->mux_id &&
				    ai->bearer_id == flow_status->bearer_id) {
					ancillary = ai->reserved;
					break;
				}
			}
		}

		trace_dfc_flow_ind(svc_ind->data->index,
				   i, flow_status->mux_id,
				   flow_status->bearer_id,
				   flow_status->num_bytes,
				   flow_status->seq_num,
				   ack_req);
				   ack_req,
				   ancillary);

		dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port,
					  flow_status->mux_id);
		if (!dev)
@@ -708,9 +831,10 @@ static void dfc_do_burst_flow_control(struct work_struct *work)

		if (unlikely(flow_status->bearer_id == 0xFF))
			dfc_all_bearer_flow_ctl(
				dev, qos, ack_req, flow_status);
				dev, qos, ack_req, ancillary, flow_status);
		else
			dfc_update_fc_map(dev, qos, ack_req, flow_status);
			dfc_update_fc_map(
				dev, qos, ack_req, ancillary, flow_status);

		spin_unlock_bh(&qos->qos_lock);
	}
@@ -892,7 +1016,7 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
	struct rmnet_flow_map *itm;
	u32 start_grant;

	spin_lock(&qos->qos_lock);
	spin_lock_bh(&qos->qos_lock);

	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
	if (unlikely(!itm))
@@ -902,7 +1026,8 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
	if (unlikely(!bearer))
		goto out;

	trace_dfc_flow_check(bearer->bearer_id, len, bearer->grant_size);
	trace_dfc_flow_check(dev->name, bearer->bearer_id,
			     len, bearer->grant_size);

	if (!bearer->grant_size)
		goto out;
@@ -924,5 +1049,17 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
		dfc_bearer_flow_ctl(dev, bearer, qos);

out:
	spin_unlock(&qos->qos_lock);
	spin_unlock_bh(&qos->qos_lock);
}

void dfc_qmi_wq_flush(struct qmi_info *qmi)
{
	struct dfc_qmi_data *dfc_data;
	int i;

	for (i = 0; i < MAX_CLIENT_NUM; i++) {
		dfc_data = (struct dfc_qmi_data *)(qmi->fc_info[i].dfc_client);
		if (dfc_data)
			flush_workqueue(dfc_data->dfc_wq);
	}
}
+146 −125
Original line number Diff line number Diff line
@@ -21,6 +21,8 @@
#include <trace/events/dfc.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>

#define NLMSG_FLOW_ACTIVATE 1
#define NLMSG_FLOW_DEACTIVATE 2
@@ -35,6 +37,7 @@ unsigned int rmnet_wq_frequency __read_mostly = 4;
module_param(rmnet_wq_frequency, uint, 0644);
MODULE_PARM_DESC(rmnet_wq_frequency, "Frequency of PS check");

#define PS_WORK_ACTIVE_BIT 0
#define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ)
#define NO_DELAY (0x0000 * HZ)

@@ -100,38 +103,6 @@ qmi_rmnet_has_client(struct qmi_info *qmi)
}

#ifdef CONFIG_QCOM_QMI_DFC
static void
qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
			   struct rmnet_flow_map *itm, int add_flow)
{
	int i;

	if (add_flow) {
		if (qmi->flow_cnt == MAX_FLOW_NUM - 1) {
			pr_err("%s() No more space for new flow\n", __func__);
			return;
		}

		qmi->flow[qmi->flow_cnt].dev = dev;
		qmi->flow[qmi->flow_cnt].itm = itm;
		qmi->flow_cnt++;
	} else {
		for (i = 0; i < qmi->flow_cnt; i++) {
			if ((qmi->flow[i].dev == dev) &&
			    (qmi->flow[i].itm == itm)) {
				qmi->flow[i].dev =
					qmi->flow[qmi->flow_cnt-1].dev;
				qmi->flow[i].itm =
					qmi->flow[qmi->flow_cnt-1].itm;
				qmi->flow[qmi->flow_cnt-1].dev = NULL;
				qmi->flow[qmi->flow_cnt-1].itm = NULL;
				qmi->flow_cnt--;
				break;
			}
		}
	}
}

static void
qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
			  struct qos_info *qos)
@@ -142,7 +113,6 @@ qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
	ASSERT_RTNL();

	list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) {
		qmi_rmnet_update_flow_link(qmi, dev, itm, 0);
		list_del(&itm->list);
		kfree(itm);
	}
@@ -232,7 +202,7 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
	new_map.flow_id = tcm->tcm_parent;
	new_map.ip_type = tcm->tcm_ifindex;
	new_map.tcm_handle = tcm->tcm_handle;
	trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id,
	trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
			    new_map.ip_type, new_map.tcm_handle, 1);

	spin_lock_bh(&qos_info->qos_lock);
@@ -248,7 +218,6 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
			return -ENOMEM;
		}

		qmi_rmnet_update_flow_link(qmi, dev, itm, 1);
		qmi_rmnet_update_flow_map(itm, &new_map);
		list_add(&itm->list, &qos_info->flow_head);

@@ -273,6 +242,9 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,

		qmi_rmnet_flow_control(dev, itm->tcm_handle,
				bearer->grant_size > 0 ? 1 : 0);

		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
				 bearer->grant_size, 0, itm->tcm_handle, 1);
	}

	spin_unlock_bh(&qos_info->qos_lock);
@@ -287,7 +259,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
	struct rmnet_flow_map new_map, *itm;
	struct rmnet_bearer_map *bearer;
	int bearer_removed = 0;

	if (!qos_info)
		return -EINVAL;
@@ -307,64 +278,32 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
				     new_map.ip_type);
	if (itm) {
		trace_dfc_flow_info(new_map.bearer_id, new_map.flow_id,
				    new_map.ip_type, itm->tcm_handle, 0);
		qmi_rmnet_update_flow_link(qmi, dev, itm, 0);
		trace_dfc_flow_info(dev->name, new_map.bearer_id,
				    new_map.flow_id, new_map.ip_type,
				    itm->tcm_handle, 0);
		list_del(&itm->list);

		/* Enable flow to allow new call setup */
		qmi_rmnet_flow_control(dev, itm->tcm_handle, 1);
		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
				 0, 0, itm->tcm_handle, 1);

		/*clear bearer map*/
		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
		if (bearer && --bearer->flow_ref == 0) {
			list_del(&bearer->list);
			bearer_removed = 1;
			kfree(bearer);
		}

		kfree(itm);
		if (bearer_removed)
			kfree(bearer);
	}

	spin_unlock_bh(&qos_info->qos_lock);

	return 0;
	if (list_empty(&qos_info->flow_head)) {
		netif_tx_wake_all_queues(dev);
		trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
	}

static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi)
{
	int i;
	struct qos_info *qos;
	struct rmnet_flow_map *m;
	struct rmnet_bearer_map *bearer;
	int qlen;

	if (!qmi_rmnet_has_dfc_client(qmi) || (qmi->flow_cnt == 0))
		return 0;

	ASSERT_RTNL();

	for (i = 0; i < qmi->flow_cnt; i++) {
		qos = (struct qos_info *)rmnet_get_qos_pt(qmi->flow[i].dev);
		m = qmi->flow[i].itm;

		spin_lock_bh(&qos->qos_lock);

		bearer = qmi_rmnet_get_bearer_map(qos, m->bearer_id);
		if (bearer) {
			bearer->grant_size = DEFAULT_GRANT;
			bearer->grant_thresh =
				qmi_rmnet_grant_per(DEFAULT_GRANT);
			bearer->seq = 0;
			bearer->ack_req = 0;
		}

		qlen = qmi_rmnet_flow_control(qmi->flow[i].dev,
					     m->tcm_handle, 1);
		trace_dfc_qmi_tc(m->bearer_id, m->flow_id,
				 DEFAULT_GRANT, qlen,
				 m->tcm_handle, 1);

		spin_unlock_bh(&qos->qos_lock);
	}
	spin_unlock_bh(&qos_info->qos_lock);

	return 0;
}
@@ -419,11 +358,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
{
	return -EINVAL;
}

static inline int qmi_rmnet_enable_all_flows(struct qmi_info *qmi)
{
	return 0;
}
#endif

static int
@@ -568,13 +502,13 @@ void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)

	ASSERT_RTNL();

	qmi_rmnet_work_exit(port);

	if (qmi->wda_client) {
		wda_qmi_client_exit(qmi->wda_client);
		qmi->wda_client = NULL;
	}

	qmi_rmnet_work_exit(port);

	for (i = 0; i < MAX_CLIENT_NUM; i++) {
		if (!__qmi_rmnet_delete_client(port, qmi, i))
			return;
@@ -582,6 +516,38 @@ void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
}
EXPORT_SYMBOL(qmi_rmnet_qmi_exit);

void qmi_rmnet_enable_all_flows(struct net_device *dev)
{
	struct qos_info *qos;
	struct rmnet_bearer_map *bearer;
	int do_wake = 0;

	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
	if (!qos)
		return;

	spin_lock_bh(&qos->qos_lock);

	list_for_each_entry(bearer, &qos->bearer_head, list) {
		bearer->grant_before_ps = bearer->grant_size;
		bearer->seq_before_ps = bearer->seq;
		bearer->grant_size = DEFAULT_GRANT;
		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
		bearer->seq = 0;
		bearer->ack_req = 0;
		bearer->ancillary = 0;
		do_wake = 1;
	}

	if (do_wake) {
		netif_tx_wake_all_queues(dev);
		trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
	}

	spin_unlock_bh(&qos->qos_lock);
}
EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);

#ifdef CONFIG_QCOM_QMI_DFC
void qmi_rmnet_burst_fc_check(struct net_device *dev,
			      int ip_type, u32 mark, unsigned int len)
@@ -595,6 +561,59 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev,
}
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);

int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
{
	struct qos_info *qos = rmnet_get_qos_pt(dev);
	int txq = 0, ip_type = AF_INET;
	unsigned int len = skb->len;
	struct rmnet_flow_map *itm;
	u32 mark = skb->mark;

	if (!qos)
		return 0;

	switch (skb->protocol) {
	/* TCPv4 ACKs */
	case htons(ETH_P_IP):
		ip_type = AF_INET;
		if ((!mark) &&
		    (ip_hdr(skb)->protocol == IPPROTO_TCP) &&
		    (len == 40 || len == 52) &&
		    (ip_hdr(skb)->ihl == 5) &&
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
			return 1;
		break;

	/* TCPv6 ACKs */
	case htons(ETH_P_IPV6):
		ip_type = AF_INET6;
		if ((!mark) &&
		    (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
		    (len == 60 || len == 72) &&
		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
			return 1;
		/* Fall through */
	}

	/* Default flows */
	if (!mark)
		return 0;

	/* Dedicated flows */
	spin_lock_bh(&qos->qos_lock);

	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
	if (unlikely(!itm))
		goto done;

	txq = itm->tcm_handle;

done:
	spin_unlock_bh(&qos->qos_lock);
	return txq;
}
EXPORT_SYMBOL(qmi_rmnet_get_queue);

inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
{
	return grant / qmi_rmnet_scale_factor;
@@ -661,8 +680,11 @@ int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
			__func__, enable, rc);
		return rc;
	}
	if (enable)
		qmi_rmnet_enable_all_flows(qmi);

	if (enable) {
		dfc_qmi_wq_flush(qmi);
		rmnet_enable_all_flows(port);
	}

	return 0;
}
@@ -679,9 +701,9 @@ EXPORT_SYMBOL(qmi_rmnet_work_restart);
static void qmi_rmnet_check_stats(struct work_struct *work)
{
	struct rmnet_powersave_work *real_work;
	struct qmi_info *qmi;
	u64 rxd, txd;
	u64 rx, tx;
	unsigned long lock_delay;

	real_work = container_of(to_delayed_work(work),
				 struct rmnet_powersave_work, work);
@@ -689,28 +711,22 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
	if (unlikely(!real_work || !real_work->port))
		return;

	/* Min Delay for retry errors */
	lock_delay = qmi_rmnet_work_get_active(real_work->port) ?
			PS_INTERVAL : (HZ / 50);

	if (!rtnl_trylock()) {
		queue_delayed_work(rmnet_ps_wq, &real_work->work, lock_delay);
	qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
	if (unlikely(!qmi))
		return;
	}
	if (!qmi_rmnet_work_get_active(real_work->port)) {
		qmi_rmnet_work_set_active(real_work->port, 1);

	if (qmi->ps_enabled) {
		/* Retry after small delay if qmi error
		 * This resumes UL grants by disabling
		 * powersave mode if successful.
		 */
		if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
			qmi_rmnet_work_set_active(real_work->port, 0);
			queue_delayed_work(rmnet_ps_wq,
					   &real_work->work, lock_delay);
			rtnl_unlock();
					   &real_work->work, HZ / 50);
			return;

		}
		qmi->ps_enabled = 0;
		goto end;
	}

@@ -721,26 +737,34 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
	real_work->old_tx_pkts = tx;

	if (!rxd && !txd) {
		qmi_rmnet_work_set_active(real_work->port, 0);
		/* Retry after lock delay if enabling powersave fails.
		 * This will cause UL grants to continue being sent
		 * suboptimally. Keeps wq active until successful.
		 */
		if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
			qmi_rmnet_work_set_active(real_work->port, 1);
			queue_delayed_work(rmnet_ps_wq,
					   &real_work->work, PS_INTERVAL);

			return;
		}
		qmi->ps_enabled = 1;
		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);

		rtnl_unlock();
		return;
	}
end:
	rtnl_unlock();
	queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL);
}

static void qmi_rmnet_work_set_active(void *port, int status)
{
	struct qmi_info *qmi;

	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
	if (unlikely(!qmi))
		return;

	if (status)
		set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
	else
		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
}

void qmi_rmnet_work_init(void *port)
{
	if (rmnet_ps_wq)
@@ -766,29 +790,26 @@ void qmi_rmnet_work_init(void *port)
}
EXPORT_SYMBOL(qmi_rmnet_work_init);

void qmi_rmnet_work_set_active(void *port, int status)
void qmi_rmnet_work_maybe_restart(void *port)
{
	if (!port)
	struct qmi_info *qmi;

	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
	if (unlikely(!qmi))
		return;
	((struct qmi_info *)rmnet_get_qmi_pt(port))->active = status;
}
EXPORT_SYMBOL(qmi_rmnet_work_set_active);

int qmi_rmnet_work_get_active(void *port)
{
	if (!port)
		return 0;
	return ((struct qmi_info *)rmnet_get_qmi_pt(port))->active;
	if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active))
		qmi_rmnet_work_restart(port);
}
EXPORT_SYMBOL(qmi_rmnet_work_get_active);
EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);

void qmi_rmnet_work_exit(void *port)
{
	qmi_rmnet_work_set_active(port, 0);
	if (!rmnet_ps_wq || !rmnet_work)
		return;
	cancel_delayed_work_sync(&rmnet_work->work);
	destroy_workqueue(rmnet_ps_wq);
	qmi_rmnet_work_set_active(port, 0);
	rmnet_ps_wq = NULL;
	kfree(rmnet_work);
	rmnet_work = NULL;
Loading