Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4207270b authored by Sean Tranchetti's avatar Sean Tranchetti
Browse files

soc: qcom: dfc: Enable QMAP DFC commands



Enable QMAP DFC commands to either request new grants when
current grant reaches threshold or acknowledge the flow
disable messages.

CRs-fixed: 2280803
Change-Id: I46fb37ceaabfc8d25c877e1ef232176f63a0e76c
Acked-by: default avatarWeiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: default avatarSean Tranchetti <stranche@codeaurora.org>
parent da1bc76d
Loading
Loading
Loading
Loading
+30 −4
Original line number Original line Diff line number Diff line
@@ -533,10 +533,8 @@ static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
	}
	}


	spin_unlock_irqrestore(&port->agg_lock, flags);
	spin_unlock_irqrestore(&port->agg_lock, flags);
	if (skb) {
	if (skb)
		skb->protocol = htons(ETH_P_MAP);
		dev_queue_xmit(skb);
		dev_queue_xmit(skb);
	}


	kfree(work);
	kfree(work);
}
}
@@ -598,6 +596,7 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
			dev_queue_xmit(skb);
			dev_queue_xmit(skb);
			return;
			return;
		}
		}
		port->agg_skb->protocol = htons(ETH_P_MAP);
		port->agg_count = 1;
		port->agg_count = 1;
		getnstimeofday(&port->agg_time);
		getnstimeofday(&port->agg_time);
		dev_kfree_skb_any(skb);
		dev_kfree_skb_any(skb);
@@ -616,7 +615,6 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
		port->agg_state = 0;
		port->agg_state = 0;
		spin_unlock_irqrestore(&port->agg_lock, flags);
		spin_unlock_irqrestore(&port->agg_lock, flags);
		hrtimer_cancel(&port->hrtimer);
		hrtimer_cancel(&port->hrtimer);
		agg_skb->protocol = htons(ETH_P_MAP);
		dev_queue_xmit(agg_skb);
		dev_queue_xmit(agg_skb);
		goto new_packet;
		goto new_packet;
	}
	}
@@ -663,3 +661,31 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)


	spin_unlock_irqrestore(&port->agg_lock, flags);
	spin_unlock_irqrestore(&port->agg_lock, flags);
}
}

void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb)
{
	struct rmnet_port *port;
	struct sk_buff *agg_skb;
	unsigned long flags;

	port = rmnet_get_port(qmap_skb->dev);

	if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
		spin_lock_irqsave(&port->agg_lock, flags);
		if (port->agg_skb) {
			agg_skb = port->agg_skb;
			port->agg_skb = 0;
			port->agg_count = 0;
			memset(&port->agg_time, 0, sizeof(struct timespec));
			port->agg_state = 0;
			spin_unlock_irqrestore(&port->agg_lock, flags);
			hrtimer_cancel(&port->hrtimer);
			dev_queue_xmit(agg_skb);
		} else {
			spin_unlock_irqrestore(&port->agg_lock, flags);
		}
	}

	dev_queue_xmit(qmap_skb);
}
EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
+114 −22
Original line number Original line Diff line number Diff line
@@ -24,6 +24,33 @@
#define DFC_MAX_BEARERS_V01 16
#define DFC_MAX_BEARERS_V01 16
#define DFC_MAX_QOS_ID_V01 2
#define DFC_MAX_QOS_ID_V01 2


#define DFC_ACK_TYPE_DISABLE 1
#define DFC_ACK_TYPE_THRESHOLD 2

struct dfc_qmap_header {
	u8  pad_len:6;
	u8  reserved_bit:1;
	u8  cd_bit:1;
	u8  mux_id;
	__be16   pkt_len;
} __aligned(1);

struct dfc_ack_cmd {
	struct dfc_qmap_header header;
	u8  command_name;
	u8  cmd_type:2;
	u8  reserved:6;
	u16 reserved2;
	u32 transaction_id;
	u8  ver:2;
	u8  reserved3:6;
	u8  type:2;
	u8  reserved4:6;
	u16 dfc_seq;
	u8  reserved5[3];
	u8  bearer_id;
} __aligned(1);

struct dfc_qmi_data {
struct dfc_qmi_data {
	void *rmnet_port;
	void *rmnet_port;
	struct workqueue_struct *dfc_wq;
	struct workqueue_struct *dfc_wq;
@@ -514,25 +541,75 @@ static int dfc_init_service(struct dfc_qmi_data *data, struct qmi_info *qmi)
	return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
	return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
}
}


static int dfc_bearer_flow_ctl(struct net_device *dev, struct qos_info *qos,
static void
			       u8 bearer_id, u32 grant_size, int enable)
dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
{
	struct qos_info *qos = rmnet_get_qos_pt(dev);
	struct sk_buff *skb;
	struct dfc_ack_cmd *msg;
	int data_size = sizeof(struct dfc_ack_cmd);
	int header_size = sizeof(struct dfc_qmap_header);

	if (!qos)
		return;

	skb = alloc_skb(data_size, GFP_ATOMIC);
	if (!skb)
		return;

	msg = (struct dfc_ack_cmd *)skb_put(skb, data_size);
	memset(msg, 0, data_size);

	msg->header.cd_bit = 1;
	msg->header.mux_id = mux_id;
	msg->header.pkt_len = htons(data_size - header_size);

	msg->bearer_id = bearer_id;
	msg->command_name = 4;
	msg->cmd_type = 0;
	msg->dfc_seq = htons(seq);
	msg->type = type;
	msg->ver = 2;
	msg->transaction_id = htonl(qos->tran_num);

	skb->dev = qos->real_dev;
	skb->protocol = htons(ETH_P_MAP);

	trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
	qos->tran_num++;

	rmnet_map_tx_qmap_cmd(skb);
}

static int dfc_bearer_flow_ctl(struct net_device *dev,
			       struct rmnet_bearer_map *bearer,
			       struct qos_info *qos)
{
{
	struct list_head *p;
	struct list_head *p;
	struct rmnet_flow_map *itm;
	struct rmnet_flow_map *itm;
	int rc = 0, qlen;
	int rc = 0, qlen;
	int enable;

	enable = bearer->grant_size ? 1 : 0;


	list_for_each(p, &qos->flow_head) {
	list_for_each(p, &qos->flow_head) {
		itm = list_entry(p, struct rmnet_flow_map, list);
		itm = list_entry(p, struct rmnet_flow_map, list);


		if (itm->bearer_id == bearer_id) {
		if (itm->bearer_id == bearer->bearer_id) {
			qlen = tc_qdisc_flow_control(dev, itm->tcm_handle,
			qlen = tc_qdisc_flow_control(dev, itm->tcm_handle,
						    enable);
						    enable);
			trace_dfc_qmi_tc(itm->bearer_id, itm->flow_id,
			trace_dfc_qmi_tc(itm->bearer_id, itm->flow_id,
					 grant_size, qlen, itm->tcm_handle,
					 bearer->grant_size, qlen,
					 enable);
					 itm->tcm_handle, enable);
			rc++;
			rc++;
		}
		}
	}
	}

	if (enable == 0 && bearer->ack_req)
		dfc_send_ack(dev, bearer->bearer_id,
			     bearer->seq, qos->mux_id,
			     DFC_ACK_TYPE_DISABLE);

	return rc;
	return rc;
}
}


@@ -550,6 +627,8 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
		bearer_itm = list_entry(p, struct rmnet_bearer_map, list);
		bearer_itm = list_entry(p, struct rmnet_bearer_map, list);


		bearer_itm->grant_size = fc_info->num_bytes;
		bearer_itm->grant_size = fc_info->num_bytes;
		bearer_itm->grant_thresh =
			qmi_rmnet_grant_per(bearer_itm->grant_size);
		bearer_itm->seq = fc_info->seq_num;
		bearer_itm->seq = fc_info->seq_num;
		bearer_itm->ack_req = ack_req;
		bearer_itm->ack_req = ack_req;
	}
	}
@@ -565,6 +644,12 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
				 flow_itm->tcm_handle, enable);
				 flow_itm->tcm_handle, enable);
		rc++;
		rc++;
	}
	}

	if (enable == 0 && ack_req)
		dfc_send_ack(dev, fc_info->bearer_id,
			     fc_info->seq_num, fc_info->mux_id,
			     DFC_ACK_TYPE_DISABLE);

	return rc;
	return rc;
}
}


@@ -584,12 +669,12 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
			action = 0;
			action = 0;


		itm->grant_size = fc_info->num_bytes;
		itm->grant_size = fc_info->num_bytes;
		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
		itm->seq = fc_info->seq_num;
		itm->seq = fc_info->seq_num;
		itm->ack_req = ack_req;
		itm->ack_req = ack_req;


		if (action != -1)
		if (action != -1)
			rc = dfc_bearer_flow_ctl(dev, qos, fc_info->bearer_id,
			rc = dfc_bearer_flow_ctl(dev, itm, qos);
						itm->grant_size, action);
	} else {
	} else {
		pr_debug("grant %u before flow activate", fc_info->num_bytes);
		pr_debug("grant %u before flow activate", fc_info->num_bytes);
		qos->default_grant = fc_info->num_bytes;
		qos->default_grant = fc_info->num_bytes;
@@ -614,18 +699,10 @@ static void dfc_do_burst_flow_control(struct work_struct *work)
		return;
		return;
	}
	}


get_lock:
	local_bh_disable();
	local_bh_disable();
	/* This will drop some messages but that is
	while (!rtnl_trylock()) {
	 * unavoidable for now since the notifier callback is
	 * protected by rtnl_lock() and destroy_workqueue()
	 * will dead lock with this.
	 */
	if (!rtnl_trylock()) {
		if (!svc_ind->data->restart_state) {
		if (!svc_ind->data->restart_state) {
			local_bh_enable();
			cond_resched_softirq();
			msleep(20);
			goto get_lock;
		} else {
		} else {
			kfree(ind);
			kfree(ind);
			kfree(svc_ind);
			kfree(svc_ind);
@@ -713,6 +790,11 @@ static void dfc_bearer_limit_work(struct work_struct *work)
		}
		}
	}
	}


	if (dfc_ind->bearer->ack_req)
		dfc_send_ack(dfc_ind->dev, dfc_ind->bearer->bearer_id,
			     dfc_ind->bearer->seq, dfc_ind->qos->mux_id,
			     DFC_ACK_TYPE_DISABLE);

done:
done:
	kfree(dfc_ind);
	kfree(dfc_ind);
	rtnl_unlock();
	rtnl_unlock();
@@ -899,11 +981,12 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
	struct rmnet_flow_map *itm;
	struct rmnet_flow_map *itm;
	struct dfc_qmi_data *data;
	struct dfc_qmi_data *data;
	int ip_type;
	int ip_type;
	u32 start_grant;


	ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET;
	ip_type = (ip_hdr(skb)->version == IP_VER_6) ? AF_INET6 : AF_INET;


	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
	if (!itm)
	if (unlikely(!itm))
		return;
		return;


	bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
	bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
@@ -915,11 +998,22 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
	if (!bearer->grant_size)
	if (!bearer->grant_size)
		return;
		return;


	if (skb->len < bearer->grant_size) {
	start_grant = bearer->grant_size;
	if (skb->len >= bearer->grant_size)
		bearer->grant_size = 0;
	else
		bearer->grant_size -= skb->len;
		bearer->grant_size -= skb->len;
		return;

	if (start_grant > bearer->grant_thresh &&
	    bearer->grant_size <= bearer->grant_thresh) {
		dfc_send_ack(dev, bearer->bearer_id,
			     bearer->seq, qos->mux_id,
			     DFC_ACK_TYPE_THRESHOLD);
	}
	}


	if (bearer->grant_size)
		return;

	data = (struct dfc_qmi_data *)qmi_rmnet_has_dfc_client(qmi);
	data = (struct dfc_qmi_data *)qmi_rmnet_has_dfc_client(qmi);
	if (!data)
	if (!data)
		return;
		return;
@@ -935,8 +1029,6 @@ void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
	dfc_ind->bearer = bearer;
	dfc_ind->bearer = bearer;
	dfc_ind->data = data;
	dfc_ind->data = data;


	bearer->grant_size = 0;

	/* stop the flow in hope that the worker thread is
	/* stop the flow in hope that the worker thread is
	 * immediately scheduled beyond this point of time
	 * immediately scheduled beyond this point of time
	 */
	 */
+63 −22
Original line number Original line Diff line number Diff line
@@ -19,6 +19,7 @@
#include <net/pkt_sched.h>
#include <net/pkt_sched.h>
#include "qmi_rmnet_i.h"
#include "qmi_rmnet_i.h"
#include <trace/events/dfc.h>
#include <trace/events/dfc.h>
#include <linux/moduleparam.h>


#define NLMSG_FLOW_ACTIVATE 1
#define NLMSG_FLOW_ACTIVATE 1
#define NLMSG_FLOW_DEACTIVATE 2
#define NLMSG_FLOW_DEACTIVATE 2
@@ -31,6 +32,10 @@
#define PS_INTERVAL (0x0004 * HZ)
#define PS_INTERVAL (0x0004 * HZ)
#define NO_DELAY (0x0000 * HZ)
#define NO_DELAY (0x0000 * HZ)


#ifdef CONFIG_QCOM_QMI_DFC
static unsigned int qmi_rmnet_scale_factor = 5;
#endif

struct qmi_elem_info data_ep_id_type_v01_ei[] = {
struct qmi_elem_info data_ep_id_type_v01_ei[] = {
	{
	{
		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
@@ -228,7 +233,6 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
		qmi_rmnet_update_flow_link(qmi, dev, itm, 1);
		qmi_rmnet_update_flow_link(qmi, dev, itm, 1);
		qmi_rmnet_update_flow_map(itm, &new_map);
		qmi_rmnet_update_flow_map(itm, &new_map);
		list_add(&itm->list, &qos_info->flow_head);
		list_add(&itm->list, &qos_info->flow_head);
	}


		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
		if (bearer) {
		if (bearer) {
@@ -241,9 +245,16 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
			bearer->bearer_id = new_map.bearer_id;
			bearer->bearer_id = new_map.bearer_id;
			bearer->flow_ref = 1;
			bearer->flow_ref = 1;
			bearer->grant_size = qos_info->default_grant;
			bearer->grant_size = qos_info->default_grant;
			bearer->grant_thresh =
				qmi_rmnet_grant_per(bearer->grant_size);
			qos_info->default_grant = DEFAULT_GRANT;
			list_add(&bearer->list, &qos_info->bearer_head);
			list_add(&bearer->list, &qos_info->bearer_head);
		}
		}


		tc_qdisc_flow_control(dev, itm->tcm_handle,
				bearer->grant_size > 0 ? 1 : 0);
	}

	return 0;
	return 0;
}
}


@@ -276,7 +287,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
				    new_map.ip_type, itm->tcm_handle, 0);
				    new_map.ip_type, itm->tcm_handle, 0);
		qmi_rmnet_update_flow_link(qmi, dev, itm, 0);
		qmi_rmnet_update_flow_link(qmi, dev, itm, 0);
		list_del(&itm->list);
		list_del(&itm->list);
	}


		/*clear bearer map*/
		/*clear bearer map*/
		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
@@ -288,6 +298,8 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
		kfree(itm);
		kfree(itm);
		if (bearer_removed)
		if (bearer_removed)
			kfree(bearer);
			kfree(bearer);
	}

	return 0;
	return 0;
}
}


@@ -311,7 +323,9 @@ static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi)
		if (bearer) {
		if (bearer) {
			if (bearer->grant_size == 0)
			if (bearer->grant_size == 0)
				need_enable = 1;
				need_enable = 1;
			bearer->grant_size = qos->default_grant;
			bearer->grant_size = DEFAULT_GRANT;
			bearer->grant_thresh =
				qmi_rmnet_grant_per(DEFAULT_GRANT);
			if (need_enable) {
			if (need_enable) {
				qlen = tc_qdisc_flow_control(qmi->flow[i].dev,
				qlen = tc_qdisc_flow_control(qmi->flow[i].dev,
							     m->tcm_handle, 1);
							     m->tcm_handle, 1);
@@ -324,6 +338,27 @@ static int qmi_rmnet_enable_all_flows(struct qmi_info *qmi)


	return 0;
	return 0;
}
}

static int qmi_rmnet_set_scale_factor(const char *val,
				      const struct kernel_param *kp)
{
	int ret;
	unsigned int num = 0;

	ret = kstrtouint(val, 10, &num);
	if (ret != 0 || num == 0)
		return -EINVAL;

	return param_set_uint(val, kp);
}

static const struct kernel_param_ops qmi_rmnet_scale_ops = {
	.set	= qmi_rmnet_set_scale_factor,
	.get	= param_get_uint,
};

module_param_cb(qmi_rmnet_scale_factor, &qmi_rmnet_scale_ops,
		&qmi_rmnet_scale_factor, 0664);
#else
#else
static inline void
static inline void
qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
@@ -531,6 +566,12 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev, struct sk_buff *skb)
}
}
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);


inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
{
	return grant / qmi_rmnet_scale_factor;
}
EXPORT_SYMBOL(qmi_rmnet_grant_per);

void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
{
{
	struct qos_info *qos;
	struct qos_info *qos;
+5 −8
Original line number Original line Diff line number Diff line
@@ -37,6 +37,7 @@ struct rmnet_bearer_map {
	u8 bearer_id;
	u8 bearer_id;
	int flow_ref;
	int flow_ref;
	u32 grant_size;
	u32 grant_size;
	u32 grant_thresh;
	u16 seq;
	u16 seq;
	u8  ack_req;
	u8  ack_req;
};
};
@@ -93,6 +94,8 @@ struct data_ep_id_type_v01 {


extern struct qmi_elem_info data_ep_id_type_v01_ei[];
extern struct qmi_elem_info data_ep_id_type_v01_ei[];


void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);

#ifdef CONFIG_QCOM_QMI_DFC
#ifdef CONFIG_QCOM_QMI_DFC
struct rmnet_flow_map *
struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -101,6 +104,8 @@ qmi_rmnet_get_flow_map(struct qos_info *qos_info,
struct rmnet_bearer_map *
struct rmnet_bearer_map *
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id);
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id);


unsigned int qmi_rmnet_grant_per(unsigned int grant);

int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);


void dfc_qmi_client_exit(void *dfc_data);
void dfc_qmi_client_exit(void *dfc_data);
@@ -108,8 +113,6 @@ void dfc_qmi_client_exit(void *dfc_data);
void dfc_qmi_burst_check(struct net_device *dev,
void dfc_qmi_burst_check(struct net_device *dev,
			 struct qos_info *qos, struct sk_buff *skb,
			 struct qos_info *qos, struct sk_buff *skb,
			 struct qmi_info *qmi);
			 struct qmi_info *qmi);

void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);
#else
#else
static inline struct rmnet_flow_map *
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -139,12 +142,6 @@ dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
		    struct sk_buff *skb, struct qmi_info *qmi)
		    struct sk_buff *skb, struct qmi_info *qmi)
{
{
}
}

static inline
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
	return NULL;
}
#endif
#endif


#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
+4 −0
Original line number Original line Diff line number Diff line
@@ -15,6 +15,10 @@
#define _RMNET_QMI_H
#define _RMNET_QMI_H


#include <linux/netdevice.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>

void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb);

#ifdef CONFIG_QCOM_QMI_RMNET
#ifdef CONFIG_QCOM_QMI_RMNET
void *rmnet_get_qmi_pt(void *port);
void *rmnet_get_qmi_pt(void *port);
void *rmnet_get_qos_pt(struct net_device *dev);
void *rmnet_get_qos_pt(struct net_device *dev);
Loading