Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca5592a4 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

soc: qcom: dfc: Improve QMI message handling



QMI DFC indications are queued into a single queue so that
back-to-back indications are processed at once. Also all-bearer
flow control will now only flow control known queues.

CRs-fixed: 2328710
Change-Id: I3a149c6c2442899c3cbd6998cc9c4fbcf0458382
Acked-by: default avatarWeiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent ce7fbe81
Loading
Loading
Loading
Loading
+63 −36
Original line number Diff line number Diff line
@@ -59,12 +59,14 @@ struct dfc_qmi_data {
	struct qmi_handle handle;
	struct sockaddr_qrtr ssctl;
	struct svc_info svc;
	struct work_struct qmi_ind_work;
	struct list_head qmi_ind_q;
	spinlock_t qmi_ind_lock;
	int index;
	int restart_state;
};

static void dfc_svc_init(struct work_struct *work);
static void dfc_do_burst_flow_control(struct work_struct *work);

/* **************************************************** */
#define DFC_SERVICE_ID_V01 0x4E
@@ -308,8 +310,7 @@ struct dfc_flow_status_ind_msg_v01 {
};

struct dfc_svc_ind {
	struct work_struct work;
	struct dfc_qmi_data *data;
	struct list_head list;
	struct dfc_flow_status_ind_msg_v01 dfc_info;
};

@@ -674,16 +675,13 @@ static int dfc_bearer_flow_ctl(struct net_device *dev,
			       struct rmnet_bearer_map *bearer,
			       struct qos_info *qos)
{
	struct list_head *p;
	struct rmnet_flow_map *itm;
	int rc = 0, qlen;
	int enable;

	enable = bearer->grant_size ? 1 : 0;

	list_for_each(p, &qos->flow_head) {
		itm = list_entry(p, struct rmnet_flow_map, list);

	list_for_each_entry(itm, &qos->flow_head, list) {
		if (itm->bearer_id == bearer->bearer_id) {
			/*
			 * Do not flow disable ancillary q if ancillary is true
@@ -713,14 +711,14 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
				struct qos_info *qos, u8 ack_req, u32 ancillary,
				struct dfc_flow_status_info_type_v01 *fc_info)
{
	struct list_head *p;
	struct rmnet_bearer_map *bearer_itm = NULL;
	int enable;
	int rc = 0;
	struct rmnet_bearer_map *bearer_itm;
	struct rmnet_flow_map *flow_itm;
	int rc = 0, qlen;
	bool enable;

	list_for_each(p, &qos->bearer_head) {
		bearer_itm = list_entry(p, struct rmnet_bearer_map, list);
	enable = fc_info->num_bytes > 0 ? 1 : 0;

	list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
		bearer_itm->grant_size = fc_info->num_bytes;
		bearer_itm->grant_thresh =
			qmi_rmnet_grant_per(bearer_itm->grant_size);
@@ -729,14 +727,14 @@ static int dfc_all_bearer_flow_ctl(struct net_device *dev,
		bearer_itm->ancillary = ancillary;
	}

	enable = fc_info->num_bytes > 0 ? 1 : 0;

	if (enable)
		netif_tx_wake_all_queues(dev);
	else
		netif_tx_stop_all_queues(dev);

	trace_dfc_qmi_tc(dev->name, 0xFF, 0, fc_info->num_bytes, 0, 0, enable);
	list_for_each_entry(flow_itm, &qos->flow_head, list) {
		qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle,
					      enable);
		trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id,
				 flow_itm->flow_id, fc_info->num_bytes,
				 qlen, flow_itm->tcm_handle, enable);
		rc++;
	}

	if (enable == 0 && ack_req)
		dfc_send_ack(dev, fc_info->bearer_id,
@@ -776,9 +774,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
	return rc;
}

static void dfc_do_burst_flow_control(struct work_struct *work)
static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
				      struct dfc_svc_ind *svc_ind)
{
	struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work;
	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info;
	struct net_device *dev;
	struct qos_info *qos;
@@ -788,11 +786,6 @@ static void dfc_do_burst_flow_control(struct work_struct *work)
	u32 ancillary;
	int i, j;

	if (unlikely(svc_ind->data->restart_state)) {
		kfree(svc_ind);
		return;
	}

	rcu_read_lock();

	for (i = 0; i < ind->flow_status_len; i++) {
@@ -810,7 +803,7 @@ static void dfc_do_burst_flow_control(struct work_struct *work)
			}
		}

		trace_dfc_flow_ind(svc_ind->data->index,
		trace_dfc_flow_ind(dfc->index,
				   i, flow_status->mux_id,
				   flow_status->bearer_id,
				   flow_status->num_bytes,
@@ -818,7 +811,7 @@ static void dfc_do_burst_flow_control(struct work_struct *work)
				   ack_req,
				   ancillary);

		dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port,
		dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
					  flow_status->mux_id);
		if (!dev)
			goto clean_out;
@@ -841,8 +834,37 @@ static void dfc_do_burst_flow_control(struct work_struct *work)

clean_out:
	rcu_read_unlock();
}

static void dfc_qmi_ind_work(struct work_struct *work)
{
	struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
						qmi_ind_work);
	struct dfc_svc_ind *svc_ind;
	unsigned long flags;

	if (!dfc)
		return;

	local_bh_disable();

	do {
		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
		svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
						   struct dfc_svc_ind, list);
		if (svc_ind)
			list_del(&svc_ind->list);
		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);

		if (svc_ind) {
			if (!dfc->restart_state)
				dfc_do_burst_flow_control(dfc, svc_ind);
			kfree(svc_ind);
		}
	} while (svc_ind != NULL);

	local_bh_enable();
}

static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
			    struct qmi_txn *txn, const void *data)
@@ -851,6 +873,7 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
						handle);
	struct dfc_flow_status_ind_msg_v01 *ind_msg;
	struct dfc_svc_ind *svc_ind;
	unsigned long flags;

	if (qmi != &dfc->handle)
		return;
@@ -867,13 +890,13 @@ static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
		if (!svc_ind)
			return;

		INIT_WORK((struct work_struct *)svc_ind,
			  dfc_do_burst_flow_control);

		memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
		svc_ind->data = dfc;

		queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind);
		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
		list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);

		queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
	}
}

@@ -965,6 +988,10 @@ int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc)
	data->restart_state = 0;
	memcpy(&data->svc, psvc, sizeof(data->svc));

	INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
	INIT_LIST_HEAD(&data->qmi_ind_q);
	spin_lock_init(&data->qmi_ind_lock);

	data->dfc_wq = create_singlethread_workqueue("dfc_wq");
	if (!data->dfc_wq) {
		pr_err("%s Could not create workqueue\n", __func__);