Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f5e0fc8 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

dfc: QMAP DFC flow control



Added QMAP based flow control. Registers with rmnet_ctl driver
for receiving, handling and transmitting QMAP DFC commands.

Change-Id: Ic0da19dca340fa80acc1646f033796e84afa4e4a
Acked-by: default avatarWeiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 8c3faed5
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@ obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_QMI_HELPERS)	+= qmi_helpers.o
qmi_helpers-y	+= qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_QMI_RMNET)	+= qmi_rmnet.o
obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o
obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o dfc_qmap.o
obj-$(CONFIG_QCOM_QMI_POWER_COLLAPSE) += wda_qmi.o
obj-$(CONFIG_QCOM_RMTFS_MEM)	+= rmtfs_mem.o
obj-$(CONFIG_QCOM_RPMH)		+= qcom_rpmh.o
+93 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
 */

#ifndef _DFC_DEFS_H
#define _DFC_DEFS_H

#include <linux/soc/qcom/qmi.h>
#include "qmi_rmnet_i.h"

#define DFC_MASK_TCP_BIDIR 0x1
#define DFC_MASK_RAT_SWITCH 0x2
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)

#define DFC_MAX_QOS_ID_V01 2

struct dfc_qmi_data {
	void *rmnet_port;
	struct workqueue_struct *dfc_wq;
	struct work_struct svc_arrive;
	struct qmi_handle handle;
	struct sockaddr_qrtr ssctl;
	struct svc_info svc;
	struct work_struct qmi_ind_work;
	struct list_head qmi_ind_q;
	spinlock_t qmi_ind_lock;
	int index;
	int restart_state;
};

enum dfc_ip_type_enum_v01 {
	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
	DFC_IPV4_TYPE_V01 = 0x4,
	DFC_IPV6_TYPE_V01 = 0x6,
	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
};

struct dfc_qos_id_type_v01 {
	u32 qos_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_flow_status_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 num_bytes;
	u16 seq_num;
	u8 qos_ids_len;
	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
};

struct dfc_ancillary_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 reserved;
};

struct dfc_flow_status_ind_msg_v01 {
	u8 flow_status_valid;
	u8 flow_status_len;
	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
	u8 eod_ack_reqd_valid;
	u8 eod_ack_reqd;
	u8 ancillary_info_valid;
	u8 ancillary_info_len;
	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};

struct dfc_bearer_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_tx_link_status_ind_msg_v01 {
	u8 tx_status;
	u8 bearer_info_valid;
	u8 bearer_info_len;
	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
};

void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
			       struct dfc_flow_status_ind_msg_v01 *ind);

void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
				   struct dfc_tx_link_status_ind_msg_v01 *ind);

#endif /* _DFC_DEFS_H */
+477 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
 */

#include <net/pkt_sched.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
#include <trace/events/dfc.h>
#include <soc/qcom/rmnet_ctl.h>
#include "dfc_defs.h"


#define QMAP_DFC_VER		1

#define QMAP_CMD_REQUEST	0
#define QMAP_CMD_ACK		1
#define QMAP_CMD_UNSUPPORTED	2
#define QMAP_CMD_INVALID	3

#define QMAP_DFC_CONFIG		10
#define QMAP_DFC_IND		11
#define QMAP_DFC_QUERY		12
#define QMAP_DFC_QUERY_RESP	13
#define QMAP_DFC_END_MARKER_REQ	14
#define QMAP_DFC_END_MARKER_CNF	15
#define QMAP_DFC_POWER_SAVE	16

struct qmap_hdr {
	u8	cd_pad;
	u8	mux_id;
	__be16	pkt_len;
} __aligned(1);

#define QMAP_HDR_LEN sizeof(struct qmap_hdr)

struct qmap_cmd_hdr {
	u8	pad_len:6;
	u8	reserved_bit:1;
	u8	cd_bit:1;
	u8	mux_id;
	__be16	pkt_len;
	u8	cmd_name;
	u8	cmd_type:2;
	u8	reserved:6;
	u16	reserved2;
	__be32	tx_id;
} __aligned(1);

struct qmap_dfc_config {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			cmd_id;
	u8			reserved;
	u8			tx_info:1;
	u8			reserved2:7;
	__be32			ep_type;
	__be32			iface_id;
	u32			reserved3;
} __aligned(1);

struct qmap_dfc_ind {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	__be16			seq_num;
	u8			reserved2;
	u8			tx_info_valid:1;
	u8			tx_info:1;
	u8			reserved3:6;
	u8			bearer_id;
	u8			tcp_bidir:1;
	u8			bearer_status:3;
	u8			reserved4:4;
	__be32			grant;
	u32			reserved5;
	u32			reserved6;
} __aligned(1);

struct qmap_dfc_query {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			bearer_id;
	u8			reserved2;
	u32			reserved3;
} __aligned(1);

struct qmap_dfc_query_resp {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			bearer_id;
	u8			tcp_bidir:1;
	u8			reserved:7;
	u8			reserved2;
	__be32			grant;
	u32			reserved3;
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_end_marker_req {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			bearer_id;
	u8			reserved2;
	u16			reserved3;
	__be16			seq_num;
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_end_marker_cnf {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			bearer_id;
	u8			reserved2;
	u16			reserved3;
	__be16			seq_num;
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_power_save {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			reserved2;
	u8			mode:1;
	u8			reserved3:7;
	__be32			ep_type;
	__be32			iface_id;
	u32			reserved4;
} __aligned(1);

static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
static struct dfc_qmi_data __rcu *qmap_dfc_data;
static atomic_t qmap_txid;
static void *rmnet_ctl_handle;

static void dfc_qmap_send_cmd(struct sk_buff *skb)
{
	trace_dfc_qmap(skb->data, skb->len, false);

	if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) {
		pr_err("Failed to send to rmnet ctl\n");
		kfree_skb(skb);
	}
}

static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
			       struct sk_buff *skb)
{
	struct qmap_dfc_ind *cmd;

	if (skb->len < sizeof(struct qmap_dfc_ind))
		return QMAP_CMD_INVALID;

	cmd = (struct qmap_dfc_ind *)skb->data;

	if (cmd->tx_info_valid) {
		memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
		qmap_tx_ind.tx_status = cmd->tx_info;
		qmap_tx_ind.bearer_info_valid = 1;
		qmap_tx_ind.bearer_info_len = 1;
		qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
		qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;

		dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);

		/* Ignore grant since it is always 0 */
		goto done;
	}

	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
	qmap_flow_ind.flow_status_valid = 1;
	qmap_flow_ind.flow_status_len = 1;
	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
	qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);

	if (cmd->tcp_bidir) {
		qmap_flow_ind.ancillary_info_valid = 1;
		qmap_flow_ind.ancillary_info_len = 1;
		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
	}

	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);

done:
	return QMAP_CMD_ACK;
}

static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
				      struct sk_buff *skb)
{
	struct qmap_dfc_query_resp *cmd;

	if (skb->len < sizeof(struct qmap_dfc_query_resp))
		return QMAP_CMD_INVALID;

	cmd = (struct qmap_dfc_query_resp *)skb->data;

	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
	qmap_flow_ind.flow_status_valid = 1;
	qmap_flow_ind.flow_status_len = 1;

	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
	qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;

	if (cmd->tcp_bidir) {
		qmap_flow_ind.ancillary_info_valid = 1;
		qmap_flow_ind.ancillary_info_len = 1;
		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
	}

	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);

	return QMAP_CMD_ACK;
}

static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
				    u8 bearer_id, u16 seq_num)
{
	struct net_device *dev;
	struct qos_info *qos;
	struct rmnet_bearer_map *bearer;

	dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
	if (!dev)
		return;

	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
	if (!qos)
		return;

	spin_lock_bh(&qos->qos_lock);

	bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);

	if (bearer && bearer->last_seq == seq_num && bearer->grant_size)
		bearer->ack_req = 1;
	else
		dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num);

	spin_unlock_bh(&qos->qos_lock);
}

static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
					  struct sk_buff *skb)
{
	struct qmap_dfc_end_marker_req *cmd;

	if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
		return QMAP_CMD_INVALID;

	cmd = (struct qmap_dfc_end_marker_req *)skb->data;

	dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id,
				cmd->bearer_id, ntohs(cmd->seq_num));

	return QMAP_CMD_ACK;
}

static void dfc_qmap_cmd_handler(struct sk_buff *skb)
{
	struct qmap_cmd_hdr *cmd;
	struct dfc_qmi_data *dfc;
	int rc = QMAP_CMD_ACK;

	if (!skb)
		return;

	trace_dfc_qmap(skb->data, skb->len, true);

	if (skb->len < sizeof(struct qmap_cmd_hdr))
		goto free_skb;

	cmd = (struct qmap_cmd_hdr *)skb->data;
	if (!cmd->cd_bit || cmd->cmd_type != QMAP_CMD_REQUEST ||
	    skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
		goto free_skb;

	rcu_read_lock();

	dfc = rcu_dereference(qmap_dfc_data);
	if (!dfc || READ_ONCE(dfc->restart_state)) {
		rcu_read_unlock();
		goto free_skb;
	}

	switch (cmd->cmd_name) {
	case QMAP_DFC_IND:
		rc = dfc_qmap_handle_ind(dfc, skb);
		qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
		break;

	case QMAP_DFC_QUERY_RESP:
		rc = dfc_qmap_handle_query_resp(dfc, skb);
		break;

	case QMAP_DFC_END_MARKER_REQ:
		rc = dfc_qmap_handle_end_marker_req(dfc, skb);
		break;

	default:
		rc = QMAP_CMD_UNSUPPORTED;
	}

	rcu_read_unlock();

	/* Send ack */
	cmd->cmd_type = rc;
	dfc_qmap_send_cmd(skb);

	return;

free_skb:
	kfree_skb(skb);
}

static void dfc_qmap_send_config(struct dfc_qmi_data *data)
{
	struct sk_buff *skb;
	struct qmap_dfc_config *dfc_config;
	unsigned int len = sizeof(struct qmap_dfc_config);

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		return;

	skb->protocol = htons(ETH_P_MAP);
	dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
	memset(dfc_config, 0, len);

	dfc_config->hdr.cd_bit = 1;
	dfc_config->hdr.mux_id = 0;
	dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
	dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
	dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));

	dfc_config->cmd_ver = QMAP_DFC_VER;
	dfc_config->cmd_id = QMAP_DFC_IND;
	dfc_config->tx_info = 1;
	dfc_config->ep_type = htonl(data->svc.ep_type);
	dfc_config->iface_id = htonl(data->svc.iface_id);

	dfc_qmap_send_cmd(skb);
}

void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
{
	struct sk_buff *skb;
	struct qmap_dfc_query *dfc_query;
	unsigned int len = sizeof(struct qmap_dfc_query);

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		return;

	skb->protocol = htons(ETH_P_MAP);
	dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
	memset(dfc_query, 0, len);

	dfc_query->hdr.cd_bit = 1;
	dfc_query->hdr.mux_id = mux_id;
	dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
	dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
	dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));

	dfc_query->cmd_ver = QMAP_DFC_VER;
	dfc_query->bearer_id = bearer_id;

	dfc_qmap_send_cmd(skb);
}

void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, u8 bearer_id, u16 seq)
{
	struct sk_buff *skb;
	struct qmap_dfc_end_marker_cnf *em_cnf;
	unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		return;

	em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
	memset(em_cnf, 0, len);

	em_cnf->hdr.cd_bit = 1;
	em_cnf->hdr.mux_id = qos->mux_id;
	em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER_CNF;
	em_cnf->hdr.cmd_type = QMAP_CMD_REQUEST;
	em_cnf->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));

	em_cnf->cmd_ver = QMAP_DFC_VER;
	em_cnf->bearer_id = bearer_id;
	em_cnf->seq_num = htons(seq);

	skb->protocol = htons(ETH_P_MAP);
	skb->dev = qos->real_dev;

	/* This cmd needs to be sent in-band */
	trace_dfc_qmap(skb->data, skb->len, false);
	rmnet_map_tx_qmap_cmd(skb);
}

static struct rmnet_ctl_client_hooks cb = {
	.ctl_dl_client_hook = dfc_qmap_cmd_handler,
};

int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
			 struct qmi_info *qmi)
{
	struct dfc_qmi_data *data;

	if (!port || !qmi)
		return -EINVAL;

	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	data->rmnet_port = port;
	data->index = index;
	memcpy(&data->svc, psvc, sizeof(data->svc));

	qmi->dfc_clients[index] = (void *)data;
	rcu_assign_pointer(qmap_dfc_data, data);

	atomic_set(&qmap_txid, 0);

	rmnet_ctl_handle = rmnet_ctl_register_client(&cb);
	if (!rmnet_ctl_handle)
		pr_err("Failed to register with rmnet ctl\n");

	trace_dfc_client_state_up(data->index, data->svc.instance,
				  data->svc.ep_type, data->svc.iface_id);

	pr_info("DFC QMAP init\n");

	dfc_qmap_send_config(data);

	return 0;
}

void dfc_qmap_client_exit(void *dfc_data)
{
	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;

	if (!data) {
		pr_err("%s() data is null\n", __func__);
		return;
	}

	trace_dfc_client_state_down(data->index, 0);

	rmnet_ctl_unregister_client(rmnet_ctl_handle);

	WRITE_ONCE(data->restart_state, 1);
	RCU_INIT_POINTER(qmap_dfc_data, NULL);
	synchronize_rcu();

	kfree(data);

	pr_info("DFC QMAP exit\n");
}
+28 −88
Original line number Diff line number Diff line
@@ -3,23 +3,14 @@
 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 */

#include <linux/rtnetlink.h>
#include <net/pkt_sched.h>
#include <linux/soc/qcom/qmi.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
#include "dfc_defs.h"

#include "qmi_rmnet_i.h"
#define CREATE_TRACE_POINTS
#include <trace/events/dfc.h>

#define DFC_MASK_TCP_BIDIR 0x1
#define DFC_MASK_RAT_SWITCH 0x2
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)

#define DFC_MAX_QOS_ID_V01 2

#define DFC_ACK_TYPE_DISABLE 1
#define DFC_ACK_TYPE_THRESHOLD 2

@@ -47,20 +38,6 @@ struct dfc_ack_cmd {
	u8  bearer_id;
} __aligned(1);

struct dfc_qmi_data {
	void *rmnet_port;
	struct workqueue_struct *dfc_wq;
	struct work_struct svc_arrive;
	struct qmi_handle handle;
	struct sockaddr_qrtr ssctl;
	struct svc_info svc;
	struct work_struct qmi_ind_work;
	struct list_head qmi_ind_q;
	spinlock_t qmi_ind_lock;
	int index;
	int restart_state;
};

static void dfc_svc_init(struct work_struct *work);

/* **************************************************** */
@@ -106,28 +83,6 @@ struct dfc_indication_register_resp_msg_v01 {
	struct qmi_response_type_v01 resp;
};

enum dfc_ip_type_enum_v01 {
	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
	DFC_IPV4_TYPE_V01 = 0x4,
	DFC_IPV6_TYPE_V01 = 0x6,
	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
};

struct dfc_qos_id_type_v01 {
	u32 qos_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_flow_status_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 num_bytes;
	u16 seq_num;
	u8 qos_ids_len;
	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
};

static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
	{
		.data_type	= QMI_UNSIGNED_4_BYTE,
@@ -241,13 +196,6 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
	},
};

struct dfc_ancillary_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	u32 reserved;
};

static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
	{
		.data_type	= QMI_UNSIGNED_1_BYTE,
@@ -300,31 +248,6 @@ static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
	},
};

struct dfc_flow_status_ind_msg_v01 {
	u8 flow_status_valid;
	u8 flow_status_len;
	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
	u8 eod_ack_reqd_valid;
	u8 eod_ack_reqd;
	u8 ancillary_info_valid;
	u8 ancillary_info_len;
	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};

struct dfc_bearer_info_type_v01 {
	u8 subs_id;
	u8 mux_id;
	u8 bearer_id;
	enum dfc_ip_type_enum_v01 ip_type;
};

struct dfc_tx_link_status_ind_msg_v01 {
	u8 tx_status;
	u8 bearer_info_valid;
	u8 bearer_info_len;
	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
};

struct dfc_get_flow_status_req_msg_v01 {
	u8 bearer_id_list_valid;
	u8 bearer_id_list_len;
@@ -954,6 +877,14 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
	if (!qos)
		return;

	if (dfc_qmap) {
		if (type == DFC_ACK_TYPE_DISABLE)
			dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq);
		else if (type == DFC_ACK_TYPE_THRESHOLD)
			dfc_qmap_send_query(mux_id, bearer_id);
		return;
	}

	skb = alloc_skb(data_size, GFP_ATOMIC);
	if (!skb)
		return;
@@ -1083,6 +1014,11 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
		    (itm->grant_size > 0 && fc_info->num_bytes == 0))
			action = true;

		/* This is needed by qmap */
		if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
			dfc_qmap_send_end_marker_cnf(
				qos, itm->bearer_id, itm->seq);

		itm->grant_size = fc_info->num_bytes;
		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
		itm->seq = fc_info->seq_num;
@@ -1099,10 +1035,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
	return rc;
}

static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
				      struct dfc_svc_ind *svc_ind)
void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
			       struct dfc_flow_status_ind_msg_v01 *ind)
{
	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info;
	struct net_device *dev;
	struct qos_info *qos;
	struct dfc_flow_status_info_type_v01 *flow_status;
@@ -1176,13 +1111,17 @@ static void dfc_update_tx_link_status(struct net_device *dev,
	if (!itm)
		return;

	/* If no change in tx status, ignore */
	if (itm->tx_off == !tx_status)
		return;

	if (itm->grant_size && !tx_status) {
		itm->grant_size = 0;
		itm->tcp_bidir = false;
		dfc_bearer_flow_ctl(dev, itm, qos);
	} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
		itm->grant_size = DEFAULT_GRANT;
		itm->grant_thresh = DEFAULT_GRANT;
		itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
		itm->seq = 0;
		itm->ack_req = 0;
		dfc_bearer_flow_ctl(dev, itm, qos);
@@ -1191,10 +1130,9 @@ static void dfc_update_tx_link_status(struct net_device *dev,
	itm->tx_off = !tx_status;
}

static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
					  struct dfc_svc_ind *svc_ind)
void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
				   struct dfc_tx_link_status_ind_msg_v01 *ind)
{
	struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status;
	struct net_device *dev;
	struct qos_info *qos;
	struct dfc_bearer_info_type_v01 *bearer_info;
@@ -1256,10 +1194,12 @@ static void dfc_qmi_ind_work(struct work_struct *work)

		if (!dfc->restart_state) {
			if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
				dfc_do_burst_flow_control(dfc, svc_ind);
				dfc_do_burst_flow_control(
						dfc, &svc_ind->d.dfc_info);
			else if (svc_ind->msg_id ==
					QMI_DFC_TX_LINK_STATUS_IND_V01)
				dfc_handle_tx_link_status_ind(dfc, svc_ind);
				dfc_handle_tx_link_status_ind(
						dfc, &svc_ind->d.tx_status);
		}
		kfree(svc_ind);
	} while (1);
@@ -1583,7 +1523,7 @@ void dfc_qmi_query_flow(void *dfc_data)
	svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
	memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
		sizeof(resp->flow_status[0]) * resp->flow_status_len);
	dfc_do_burst_flow_control(data, svc_ind);
	dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info);

done:
	kfree(svc_ind);
+22 −20
Original line number Diff line number Diff line
@@ -23,11 +23,15 @@

#define FLAG_DFC_MASK 0x000F
#define FLAG_POWERSAVE_MASK 0x0010
#define FLAG_QMAP_MASK 0x0020

#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
#define DFC_SUPPORTED_MODE(m) \
	((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)

int dfc_mode;
int dfc_qmap;
#define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)

unsigned int rmnet_wq_frequency __read_mostly = 1000;
@@ -82,7 +86,7 @@ void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
	int i;

	if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
	if (!qmi)
		return NULL;

	for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -379,18 +383,12 @@ static void qmi_rmnet_query_flows(struct qmi_info *qmi)
	int i;

	for (i = 0; i < MAX_CLIENT_NUM; i++) {
		if (qmi->dfc_clients[i])
		if (qmi->dfc_clients[i] && !dfc_qmap)
			dfc_qmi_query_flow(qmi->dfc_clients[i]);
	}
}

#else
static inline void
qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
			   struct rmnet_flow_map *itm, int add_flow)
{
}

static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
{
}
@@ -423,7 +421,7 @@ static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
static int
qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
{
	int idx, rc, err = 0;
	int idx, err = 0;
	struct svc_info svc;

	ASSERT_RTNL();
@@ -447,18 +445,17 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
	svc.ep_type = tcm->tcm_info;
	svc.iface_id = tcm->tcm_parent;

	if (DFC_SUPPORTED_MODE(FLAG_TO_MODE(tcm->tcm_ifindex)) &&
	if (DFC_SUPPORTED_MODE(dfc_mode) &&
	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
		rc = dfc_qmi_client_init(port, idx, &svc, qmi);
		if (rc < 0)
			err = rc;
		if (dfc_qmap)
			err = dfc_qmap_client_init(port, idx, &svc, qmi);
		else
			err = dfc_qmi_client_init(port, idx, &svc, qmi);
	}

	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
	    (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
		rc = wda_qmi_client_init(port, &svc, qmi);
		if (rc < 0)
			err = rc;
		err = wda_qmi_client_init(port, &svc, qmi);
	}

	return err;
@@ -477,6 +474,9 @@ __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
		data = qmi->dfc_pending[idx];

	if (data) {
		if (dfc_qmap)
			dfc_qmap_client_exit(data);
		else
			dfc_qmi_client_exit(data);
		qmi->dfc_clients[idx] = NULL;
		qmi->dfc_pending[idx] = NULL;
@@ -524,20 +524,22 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)

	switch (tcm->tcm_family) {
	case NLMSG_FLOW_ACTIVATE:
		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)) ||
		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
		    !qmi_rmnet_has_dfc_client(qmi))
			return;

		qmi_rmnet_add_flow(dev, tcm, qmi);
		break;
	case NLMSG_FLOW_DEACTIVATE:
		if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
			return;

		qmi_rmnet_del_flow(dev, tcm, qmi);
		break;
	case NLMSG_CLIENT_SETUP:
		dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
		dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);

		if (!DFC_SUPPORTED_MODE(dfc_mode) &&
		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
			return;
@@ -628,7 +630,7 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
			continue;
		do_wake = !bearer->grant_size;
		bearer->grant_size = DEFAULT_GRANT;
		bearer->grant_thresh = DEFAULT_GRANT;
		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
		bearer->seq = 0;
		bearer->ack_req = 0;
		bearer->tcp_bidir = false;
Loading