Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2b16b0cf authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "dfc: QMAP DFC flow control"

parents d07fa2d9 87e7297e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -830,6 +830,7 @@ endif # MSM_PM

source "drivers/soc/qcom/memshare/Kconfig"
source "drivers/soc/qcom/hab/Kconfig"
source "drivers/soc/qcom/rmnet_ctl/Kconfig"

config MSM_PERFORMANCE
	tristate "msm performance driver to support userspace fmin/fmax request"
+2 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@ obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o
qmi_helpers-y += qmi_interface.o
obj-$(CONFIG_QCOM_QMI_RMNET)	+= qmi_rmnet.o
obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o
obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o dfc_qmap.o
obj-$(CONFIG_QCOM_QMI_POWER_COLLAPSE) += wda_qmi.o
obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) +=	smem.o
@@ -102,3 +102,4 @@ obj-$(CONFIG_QCOM_AOP_DDR_MESSAGING) += aop_ddr_msgs.o
obj-$(CONFIG_MSM_HAB) += hab/
obj-$(CONFIG_QCOM_HYP_CORE_CTL) += hyp_core_ctl.o
obj-$(CONFIG_QCOM_AOP_DDRSS_COMMANDS) += aop_ddrss_cmds.o
obj-$(CONFIG_RMNET_CTL) += rmnet_ctl/
+101 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#ifndef _DFC_DEFS_H
#define _DFC_DEFS_H

#include <linux/soc/qcom/qmi.h>
#include "qmi_rmnet_i.h"

#define DFC_MASK_TCP_BIDIR 0x1
#define DFC_MASK_RAT_SWITCH 0x2
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)

#define DFC_MAX_QOS_ID_V01 2

struct dfc_qmi_data {
	void *rmnet_port;
	struct workqueue_struct *dfc_wq;
	struct work_struct svc_arrive;
	struct qmi_handle handle;
	struct sockaddr_qrtr ssctl;
	struct svc_info svc;
	struct work_struct qmi_ind_work;
	struct list_head qmi_ind_q;
	spinlock_t qmi_ind_lock;
	int index;
	int restart_state;
};

enum dfc_ip_type_enum_v01 {
	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
	DFC_IPV4_TYPE_V01 = 0x4,
	DFC_IPV6_TYPE_V01 = 0x6,
	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
};

struct dfc_qos_id_type_v01 {
	u32 qos_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_flow_status_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 num_bytes;
	u16 seq_num;
	u8 qos_ids_len;
	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
};

struct dfc_ancillary_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 reserved;
};

struct dfc_flow_status_ind_msg_v01 {
	u8 flow_status_valid;
	u8 flow_status_len;
	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
	u8 eod_ack_reqd_valid;
	u8 eod_ack_reqd;
	u8 ancillary_info_valid;
	u8 ancillary_info_len;
	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};

struct dfc_bearer_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_tx_link_status_ind_msg_v01 {
	u8 tx_status;
	u8 bearer_info_valid;
	u8 bearer_info_len;
	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
};

void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
			       struct dfc_flow_status_ind_msg_v01 *ind);

void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
				   struct dfc_tx_link_status_ind_msg_v01 *ind);

#endif /* _DFC_DEFS_H */
+485 −0
Original line number Diff line number Diff line
/*
 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <net/pkt_sched.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
#include <trace/events/dfc.h>
#include <soc/qcom/rmnet_ctl.h>
#include "dfc_defs.h"


#define QMAP_DFC_VER		1

#define QMAP_CMD_REQUEST	0
#define QMAP_CMD_ACK		1
#define QMAP_CMD_UNSUPPORTED	2
#define QMAP_CMD_INVALID	3

#define QMAP_DFC_CONFIG		10
#define QMAP_DFC_IND		11
#define QMAP_DFC_QUERY		12
#define QMAP_DFC_QUERY_RESP	13
#define QMAP_DFC_END_MARKER_REQ	14
#define QMAP_DFC_END_MARKER_CNF	15
#define QMAP_DFC_POWER_SAVE	16

struct qmap_hdr {
	u8	cd_pad;
	u8	mux_id;
	__be16	pkt_len;
} __aligned(1);

#define QMAP_HDR_LEN sizeof(struct qmap_hdr)

struct qmap_cmd_hdr {
	u8	pad_len:6;
	u8	reserved_bit:1;
	u8	cd_bit:1;
	u8	mux_id;
	__be16	pkt_len;
	u8	cmd_name;
	u8	cmd_type:2;
	u8	reserved:6;
	u16	reserved2;
	__be32	tx_id;
} __aligned(1);

struct qmap_dfc_config {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			cmd_id;
	u8			reserved;
	u8			tx_info:1;
	u8			reserved2:7;
	__be32			ep_type;
	__be32			iface_id;
	u32			reserved3;
} __aligned(1);

struct qmap_dfc_ind {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	__be16			seq_num;
	u8			reserved2;
	u8			tx_info_valid:1;
	u8			tx_info:1;
	u8			reserved3:6;
	u8			bearer_id;
	u8			tcp_bidir:1;
	u8			bearer_status:3;
	u8			reserved4:4;
	__be32			grant;
	u32			reserved5;
	u32			reserved6;
} __aligned(1);

struct qmap_dfc_query {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			bearer_id;
	u8			reserved2;
	u32			reserved3;
} __aligned(1);

struct qmap_dfc_query_resp {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			bearer_id;
	u8			tcp_bidir:1;
	u8			reserved:7;
	u8			reserved2;
	__be32			grant;
	u32			reserved3;
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_end_marker_req {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			bearer_id;
	u8			reserved2;
	u16			reserved3;
	__be16			seq_num;
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_end_marker_cnf {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			bearer_id;
	u8			reserved2;
	u16			reserved3;
	__be16			seq_num;
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_power_save {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			reserved2;
	u8			mode:1;
	u8			reserved3:7;
	__be32			ep_type;
	__be32			iface_id;
	u32			reserved4;
} __aligned(1);

static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
static struct dfc_qmi_data __rcu *qmap_dfc_data;
static atomic_t qmap_txid;
static void *rmnet_ctl_handle;

static void dfc_qmap_send_cmd(struct sk_buff *skb)
{
	trace_dfc_qmap(skb->data, skb->len, false);

	if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) {
		pr_err("Failed to send to rmnet ctl\n");
		kfree_skb(skb);
	}
}

static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
			       struct sk_buff *skb)
{
	struct qmap_dfc_ind *cmd;

	if (skb->len < sizeof(struct qmap_dfc_ind))
		return QMAP_CMD_INVALID;

	cmd = (struct qmap_dfc_ind *)skb->data;

	if (cmd->tx_info_valid) {
		memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
		qmap_tx_ind.tx_status = cmd->tx_info;
		qmap_tx_ind.bearer_info_valid = 1;
		qmap_tx_ind.bearer_info_len = 1;
		qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
		qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;

		dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);

		/* Ignore grant since it is always 0 */
		goto done;
	}

	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
	qmap_flow_ind.flow_status_valid = 1;
	qmap_flow_ind.flow_status_len = 1;
	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
	qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);

	if (cmd->tcp_bidir) {
		qmap_flow_ind.ancillary_info_valid = 1;
		qmap_flow_ind.ancillary_info_len = 1;
		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
	}

	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);

done:
	return QMAP_CMD_ACK;
}

static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
				      struct sk_buff *skb)
{
	struct qmap_dfc_query_resp *cmd;

	if (skb->len < sizeof(struct qmap_dfc_query_resp))
		return QMAP_CMD_INVALID;

	cmd = (struct qmap_dfc_query_resp *)skb->data;

	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
	qmap_flow_ind.flow_status_valid = 1;
	qmap_flow_ind.flow_status_len = 1;

	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
	qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;

	if (cmd->tcp_bidir) {
		qmap_flow_ind.ancillary_info_valid = 1;
		qmap_flow_ind.ancillary_info_len = 1;
		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
	}

	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);

	return QMAP_CMD_ACK;
}

static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
				    u8 bearer_id, u16 seq_num)
{
	struct net_device *dev;
	struct qos_info *qos;
	struct rmnet_bearer_map *bearer;

	dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
	if (!dev)
		return;

	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
	if (!qos)
		return;

	spin_lock_bh(&qos->qos_lock);

	bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);

	if (bearer && bearer->last_seq == seq_num && bearer->grant_size)
		bearer->ack_req = 1;
	else
		dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num);

	spin_unlock_bh(&qos->qos_lock);
}

static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
					  struct sk_buff *skb)
{
	struct qmap_dfc_end_marker_req *cmd;

	if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
		return QMAP_CMD_INVALID;

	cmd = (struct qmap_dfc_end_marker_req *)skb->data;

	dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id,
				cmd->bearer_id, ntohs(cmd->seq_num));

	return QMAP_CMD_ACK;
}

static void dfc_qmap_cmd_handler(struct sk_buff *skb)
{
	struct qmap_cmd_hdr *cmd;
	struct dfc_qmi_data *dfc;
	int rc = QMAP_CMD_ACK;

	if (!skb)
		return;

	trace_dfc_qmap(skb->data, skb->len, true);

	if (skb->len < sizeof(struct qmap_cmd_hdr))
		goto free_skb;

	cmd = (struct qmap_cmd_hdr *)skb->data;
	if (!cmd->cd_bit || cmd->cmd_type != QMAP_CMD_REQUEST ||
	    skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
		goto free_skb;

	rcu_read_lock();

	dfc = rcu_dereference(qmap_dfc_data);
	if (!dfc || READ_ONCE(dfc->restart_state)) {
		rcu_read_unlock();
		goto free_skb;
	}

	switch (cmd->cmd_name) {
	case QMAP_DFC_IND:
		rc = dfc_qmap_handle_ind(dfc, skb);
		qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
		break;

	case QMAP_DFC_QUERY_RESP:
		rc = dfc_qmap_handle_query_resp(dfc, skb);
		break;

	case QMAP_DFC_END_MARKER_REQ:
		rc = dfc_qmap_handle_end_marker_req(dfc, skb);
		break;

	default:
		rc = QMAP_CMD_UNSUPPORTED;
	}

	rcu_read_unlock();

	/* Send ack */
	cmd->cmd_type = rc;
	dfc_qmap_send_cmd(skb);

	return;

free_skb:
	kfree_skb(skb);
}

static void dfc_qmap_send_config(struct dfc_qmi_data *data)
{
	struct sk_buff *skb;
	struct qmap_dfc_config *dfc_config;
	unsigned int len = sizeof(struct qmap_dfc_config);

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		return;

	skb->protocol = htons(ETH_P_MAP);
	dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
	memset(dfc_config, 0, len);

	dfc_config->hdr.cd_bit = 1;
	dfc_config->hdr.mux_id = 0;
	dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
	dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
	dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));

	dfc_config->cmd_ver = QMAP_DFC_VER;
	dfc_config->cmd_id = QMAP_DFC_IND;
	dfc_config->tx_info = 1;
	dfc_config->ep_type = htonl(data->svc.ep_type);
	dfc_config->iface_id = htonl(data->svc.iface_id);

	dfc_qmap_send_cmd(skb);
}

void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
{
	struct sk_buff *skb;
	struct qmap_dfc_query *dfc_query;
	unsigned int len = sizeof(struct qmap_dfc_query);

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		return;

	skb->protocol = htons(ETH_P_MAP);
	dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
	memset(dfc_query, 0, len);

	dfc_query->hdr.cd_bit = 1;
	dfc_query->hdr.mux_id = mux_id;
	dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
	dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
	dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));

	dfc_query->cmd_ver = QMAP_DFC_VER;
	dfc_query->bearer_id = bearer_id;

	dfc_qmap_send_cmd(skb);
}

void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, u8 bearer_id, u16 seq)
{
	struct sk_buff *skb;
	struct qmap_dfc_end_marker_cnf *em_cnf;
	unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		return;

	em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
	memset(em_cnf, 0, len);

	em_cnf->hdr.cd_bit = 1;
	em_cnf->hdr.mux_id = qos->mux_id;
	em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER_CNF;
	em_cnf->hdr.cmd_type = QMAP_CMD_REQUEST;
	em_cnf->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));

	em_cnf->cmd_ver = QMAP_DFC_VER;
	em_cnf->bearer_id = bearer_id;
	em_cnf->seq_num = htons(seq);

	skb->protocol = htons(ETH_P_MAP);
	skb->dev = qos->real_dev;

	/* This cmd needs to be sent in-band */
	trace_dfc_qmap(skb->data, skb->len, false);
	rmnet_map_tx_qmap_cmd(skb);
}

static struct rmnet_ctl_client_hooks cb = {
	.ctl_dl_client_hook = dfc_qmap_cmd_handler,
};

int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
			 struct qmi_info *qmi)
{
	struct dfc_qmi_data *data;

	if (!port || !qmi)
		return -EINVAL;

	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	data->rmnet_port = port;
	data->index = index;
	memcpy(&data->svc, psvc, sizeof(data->svc));

	qmi->dfc_clients[index] = (void *)data;
	rcu_assign_pointer(qmap_dfc_data, data);

	atomic_set(&qmap_txid, 0);

	rmnet_ctl_handle = rmnet_ctl_register_client(&cb);
	if (!rmnet_ctl_handle)
		pr_err("Failed to register with rmnet ctl\n");

	trace_dfc_client_state_up(data->index, data->svc.instance,
				  data->svc.ep_type, data->svc.iface_id);

	pr_info("DFC QMAP init\n");

	dfc_qmap_send_config(data);

	return 0;
}

void dfc_qmap_client_exit(void *dfc_data)
{
	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;

	if (!data) {
		pr_err("%s() data is null\n", __func__);
		return;
	}

	trace_dfc_client_state_down(data->index, 0);

	rmnet_ctl_unregister_client(rmnet_ctl_handle);

	WRITE_ONCE(data->restart_state, 1);
	RCU_INIT_POINTER(qmap_dfc_data, NULL);
	synchronize_rcu();

	kfree(data);

	pr_info("DFC QMAP exit\n");
}
+28 −88
Original line number Diff line number Diff line
@@ -11,23 +11,14 @@
 * GNU General Public License for more details.
 */

#include <linux/rtnetlink.h>
#include <net/pkt_sched.h>
#include <linux/soc/qcom/qmi.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
#include "dfc_defs.h"

#include "qmi_rmnet_i.h"
#define CREATE_TRACE_POINTS
#include <trace/events/dfc.h>

#define DFC_MASK_TCP_BIDIR 0x1
#define DFC_MASK_RAT_SWITCH 0x2
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)

#define DFC_MAX_QOS_ID_V01 2

#define DFC_ACK_TYPE_DISABLE 1
#define DFC_ACK_TYPE_THRESHOLD 2

@@ -55,20 +46,6 @@ struct dfc_ack_cmd {
	u8  bearer_id;
} __aligned(1);

struct dfc_qmi_data {
	void *rmnet_port;
	struct workqueue_struct *dfc_wq;
	struct work_struct svc_arrive;
	struct qmi_handle handle;
	struct sockaddr_qrtr ssctl;
	struct svc_info svc;
	struct work_struct qmi_ind_work;
	struct list_head qmi_ind_q;
	spinlock_t qmi_ind_lock;
	int index;
	int restart_state;
};

static void dfc_svc_init(struct work_struct *work);

/* **************************************************** */
@@ -114,28 +91,6 @@ struct dfc_indication_register_resp_msg_v01 {
	struct qmi_response_type_v01 resp;
};

enum dfc_ip_type_enum_v01 {
	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
	DFC_IPV4_TYPE_V01 = 0x4,
	DFC_IPV6_TYPE_V01 = 0x6,
	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
};

struct dfc_qos_id_type_v01 {
	u32 qos_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_flow_status_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 num_bytes;
	u16 seq_num;
	u8 qos_ids_len;
	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
};

static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
	{
		.data_type	= QMI_UNSIGNED_4_BYTE,
@@ -249,13 +204,6 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
	},
};

struct dfc_ancillary_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 reserved;
};

static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
	{
		.data_type	= QMI_UNSIGNED_1_BYTE,
@@ -308,31 +256,6 @@ static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
	},
};

struct dfc_flow_status_ind_msg_v01 {
	u8 flow_status_valid;
	u8 flow_status_len;
	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
	u8 eod_ack_reqd_valid;
	u8 eod_ack_reqd;
	u8 ancillary_info_valid;
	u8 ancillary_info_len;
	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};

struct dfc_bearer_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_tx_link_status_ind_msg_v01 {
	u8 tx_status;
	u8 bearer_info_valid;
	u8 bearer_info_len;
	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
};

struct dfc_get_flow_status_req_msg_v01 {
	u8 bearer_id_list_valid;
	u8 bearer_id_list_len;
@@ -962,6 +885,14 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
	if (!qos)
		return;

	if (dfc_qmap) {
		if (type == DFC_ACK_TYPE_DISABLE)
			dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq);
		else if (type == DFC_ACK_TYPE_THRESHOLD)
			dfc_qmap_send_query(mux_id, bearer_id);
		return;
	}

	skb = alloc_skb(data_size, GFP_ATOMIC);
	if (!skb)
		return;
@@ -1091,6 +1022,11 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
			action = true;

		/* This is needed by qmap */
		if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
			dfc_qmap_send_end_marker_cnf(
				qos, itm->bearer_id, itm->seq);

		itm->grant_size = fc_info->num_bytes;
		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
		itm->seq = fc_info->seq_num;
@@ -1108,10 +1044,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
	return rc;
}

static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
				      struct dfc_svc_ind *svc_ind)
void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
			       struct dfc_flow_status_ind_msg_v01 *ind)
{
	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info;
	struct net_device *dev;
	struct qos_info *qos;
	struct dfc_flow_status_info_type_v01 *flow_status;
@@ -1185,13 +1120,17 @@ static void dfc_update_tx_link_status(struct net_device *dev,
	if (!itm)
		return;

	/* If no change in tx status, ignore */
	if (itm->tx_off == !tx_status)
		return;

	if (itm->grant_size && !tx_status) {
		itm->grant_size = 0;
		itm->tcp_bidir = false;
		dfc_bearer_flow_ctl(dev, itm, qos);
	} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
		itm->grant_size = DEFAULT_GRANT;
		itm->grant_thresh = DEFAULT_GRANT;
		itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
		itm->seq = 0;
		itm->ack_req = 0;
		dfc_bearer_flow_ctl(dev, itm, qos);
@@ -1200,10 +1139,9 @@ static void dfc_update_tx_link_status(struct net_device *dev,
	itm->tx_off = !tx_status;
}

static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
					  struct dfc_svc_ind *svc_ind)
void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
				   struct dfc_tx_link_status_ind_msg_v01 *ind)
{
	struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status;
	struct net_device *dev;
	struct qos_info *qos;
	struct dfc_bearer_info_type_v01 *bearer_info;
@@ -1265,10 +1203,12 @@ static void dfc_qmi_ind_work(struct work_struct *work)

		if (!dfc->restart_state) {
			if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
				dfc_do_burst_flow_control(dfc, svc_ind);
				dfc_do_burst_flow_control(
						dfc, &svc_ind->d.dfc_info);
			else if (svc_ind->msg_id ==
					QMI_DFC_TX_LINK_STATUS_IND_V01)
				dfc_handle_tx_link_status_ind(dfc, svc_ind);
				dfc_handle_tx_link_status_ind(
						dfc, &svc_ind->d.tx_status);
		}
		kfree(svc_ind);
	} while (1);
@@ -1592,7 +1532,7 @@ void dfc_qmi_query_flow(void *dfc_data)
	svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
	memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
		sizeof(resp->flow_status[0]) * resp->flow_status_len);
	dfc_do_burst_flow_control(data, svc_ind);
	dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info);

done:
	kfree(svc_ind);
Loading