Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52d89e71 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

dfc: QMAP DFC phase 2



Update to phase 2 QMAP definitions. Remove extra QMAP acks for
commands that already have response defined. DFC notify acks are
now sent over data EP.

Change-Id: I64eec4185025e9fca2d1fc8ff0a20e48c880d4d6
Acked-by: default avatarWeiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 8f5e0fc8
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -699,6 +699,16 @@ int rmnet_get_powersave_notif(void *port)
	return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
}
EXPORT_SYMBOL(rmnet_get_powersave_notif);

struct net_device *rmnet_get_real_dev(void *port)
{
	if (port)
		return ((struct rmnet_port *)port)->dev;

	return NULL;
}
EXPORT_SYMBOL(rmnet_get_real_dev);

#endif

/* Startup/Shutdown */
+3 −0
Original line number Diff line number Diff line
@@ -9,6 +9,9 @@
#include <linux/soc/qcom/qmi.h>
#include "qmi_rmnet_i.h"

#define DFC_ACK_TYPE_DISABLE 1
#define DFC_ACK_TYPE_THRESHOLD 2

#define DFC_MASK_TCP_BIDIR 0x1
#define DFC_MASK_RAT_SWITCH 0x2
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+78 −42
Original line number Diff line number Diff line
@@ -10,9 +10,10 @@
#include <soc/qcom/rmnet_ctl.h>
#include "dfc_defs.h"


#define QMAP_DFC_VER		1

#define QMAP_CMD_DONE		-1

#define QMAP_CMD_REQUEST	0
#define QMAP_CMD_ACK		1
#define QMAP_CMD_UNSUPPORTED	2
@@ -21,10 +22,7 @@
#define QMAP_DFC_CONFIG		10
#define QMAP_DFC_IND		11
#define QMAP_DFC_QUERY		12
#define QMAP_DFC_QUERY_RESP	13
#define QMAP_DFC_END_MARKER_REQ	14
#define QMAP_DFC_END_MARKER_CNF	15
#define QMAP_DFC_POWER_SAVE	16
#define QMAP_DFC_END_MARKER	13

struct qmap_hdr {
	u8	cd_pad;
@@ -92,7 +90,8 @@ struct qmap_dfc_query_resp {
	u8			bearer_id;
	u8			tcp_bidir:1;
	u8			reserved:7;
	u8			reserved2;
	u8			invalid:1;
	u8			reserved2:7;
	__be32			grant;
	u32			reserved3;
	u32			reserved4;
@@ -120,24 +119,15 @@ struct qmap_dfc_end_marker_cnf {
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_power_save {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			reserved;
	u8			reserved2;
	u8			mode:1;
	u8			reserved3:7;
	__be32			ep_type;
	__be32			iface_id;
	u32			reserved4;
} __aligned(1);

static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
static struct dfc_qmi_data __rcu *qmap_dfc_data;
static atomic_t qmap_txid;
static void *rmnet_ctl_handle;

static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
					 u8 bearer_id, u16 seq, u32 tx_id);

static void dfc_qmap_send_cmd(struct sk_buff *skb)
{
	trace_dfc_qmap(skb->data, skb->len, false);
@@ -148,6 +138,20 @@ static void dfc_qmap_send_cmd(struct sk_buff *skb)
	}
}

static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
				     struct sk_buff *skb)
{
	struct qmap_cmd_hdr *cmd;

	cmd = (struct qmap_cmd_hdr *)skb->data;

	skb->protocol = htons(ETH_P_MAP);
	skb->dev = rmnet_get_real_dev(dfc->rmnet_port);

	trace_dfc_qmap(skb->data, skb->len, false);
	dev_queue_xmit(skb);
}

static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
			       struct sk_buff *skb)
{
@@ -200,10 +204,13 @@ static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
	struct qmap_dfc_query_resp *cmd;

	if (skb->len < sizeof(struct qmap_dfc_query_resp))
		return QMAP_CMD_INVALID;
		return QMAP_CMD_DONE;

	cmd = (struct qmap_dfc_query_resp *)skb->data;

	if (cmd->invalid)
		return QMAP_CMD_DONE;

	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
	qmap_flow_ind.flow_status_valid = 1;
	qmap_flow_ind.flow_status_len = 1;
@@ -223,11 +230,11 @@ static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,

	dfc_do_burst_flow_control(dfc, &qmap_flow_ind);

	return QMAP_CMD_ACK;
	return QMAP_CMD_DONE;
}

static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
				    u8 bearer_id, u16 seq_num)
				    u8 bearer_id, u16 seq_num, u32 tx_id)
{
	struct net_device *dev;
	struct qos_info *qos;
@@ -245,10 +252,12 @@ static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,

	bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);

	if (bearer && bearer->last_seq == seq_num && bearer->grant_size)
	if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
		bearer->ack_req = 1;
	else
		dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num);
		bearer->ack_txid = tx_id;
	} else {
		dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
	}

	spin_unlock_bh(&qos->qos_lock);
}
@@ -263,17 +272,17 @@ static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,

	cmd = (struct qmap_dfc_end_marker_req *)skb->data;

	dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id,
				cmd->bearer_id, ntohs(cmd->seq_num));
	dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
				ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));

	return QMAP_CMD_ACK;
	return QMAP_CMD_DONE;
}

static void dfc_qmap_cmd_handler(struct sk_buff *skb)
{
	struct qmap_cmd_hdr *cmd;
	struct dfc_qmi_data *dfc;
	int rc = QMAP_CMD_ACK;
	int rc = QMAP_CMD_DONE;

	if (!skb)
		return;
@@ -284,10 +293,16 @@ static void dfc_qmap_cmd_handler(struct sk_buff *skb)
		goto free_skb;

	cmd = (struct qmap_cmd_hdr *)skb->data;
	if (!cmd->cd_bit || cmd->cmd_type != QMAP_CMD_REQUEST ||
	    skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
	if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
		goto free_skb;

	if (cmd->cmd_name == QMAP_DFC_QUERY) {
		if (cmd->cmd_type != QMAP_CMD_ACK)
			goto free_skb;
	} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
		goto free_skb;
	}

	rcu_read_lock();

	dfc = rcu_dereference(qmap_dfc_data);
@@ -302,11 +317,11 @@ static void dfc_qmap_cmd_handler(struct sk_buff *skb)
		qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
		break;

	case QMAP_DFC_QUERY_RESP:
	case QMAP_DFC_QUERY:
		rc = dfc_qmap_handle_query_resp(dfc, skb);
		break;

	case QMAP_DFC_END_MARKER_REQ:
	case QMAP_DFC_END_MARKER:
		rc = dfc_qmap_handle_end_marker_req(dfc, skb);
		break;

@@ -314,13 +329,19 @@ static void dfc_qmap_cmd_handler(struct sk_buff *skb)
		rc = QMAP_CMD_UNSUPPORTED;
	}

	rcu_read_unlock();

	/* Send ack */
	if (rc != QMAP_CMD_DONE) {
		cmd->cmd_type = rc;
		if (cmd->cmd_name == QMAP_DFC_IND)
			dfc_qmap_send_inband_ack(dfc, skb);
		else
			dfc_qmap_send_cmd(skb);

		rcu_read_unlock();
		return;
	}

	rcu_read_unlock();

free_skb:
	kfree_skb(skb);
@@ -356,7 +377,7 @@ static void dfc_qmap_send_config(struct dfc_qmi_data *data)
	dfc_qmap_send_cmd(skb);
}

void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
{
	struct sk_buff *skb;
	struct qmap_dfc_query *dfc_query;
@@ -383,7 +404,8 @@ void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
	dfc_qmap_send_cmd(skb);
}

void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, u8 bearer_id, u16 seq)
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
					 u8 bearer_id, u16 seq, u32 tx_id)
{
	struct sk_buff *skb;
	struct qmap_dfc_end_marker_cnf *em_cnf;
@@ -399,9 +421,9 @@ void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, u8 bearer_id, u16 seq)
	em_cnf->hdr.cd_bit = 1;
	em_cnf->hdr.mux_id = qos->mux_id;
	em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER_CNF;
	em_cnf->hdr.cmd_type = QMAP_CMD_REQUEST;
	em_cnf->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
	em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
	em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
	em_cnf->hdr.tx_id = htonl(tx_id);

	em_cnf->cmd_ver = QMAP_DFC_VER;
	em_cnf->bearer_id = bearer_id;
@@ -415,6 +437,20 @@ void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, u8 bearer_id, u16 seq)
	rmnet_map_tx_qmap_cmd(skb);
}

void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
{
	struct rmnet_bearer_map *bearer;

	if (type == DFC_ACK_TYPE_DISABLE) {
		bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
		if (bearer)
			dfc_qmap_send_end_marker_cnf(qos, bearer_id,
						     seq, bearer->ack_txid);
	} else if (type == DFC_ACK_TYPE_THRESHOLD) {
		dfc_qmap_send_query(qos->mux_id, bearer_id);
	}
}

static struct rmnet_ctl_client_hooks cb = {
	.ctl_dl_client_hook = dfc_qmap_cmd_handler,
};
+3 −9
Original line number Diff line number Diff line
@@ -11,9 +11,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/dfc.h>

#define DFC_ACK_TYPE_DISABLE 1
#define DFC_ACK_TYPE_THRESHOLD 2

struct dfc_qmap_header {
	u8  pad_len:6;
	u8  reserved_bit:1;
@@ -878,10 +875,7 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
		return;

	if (dfc_qmap) {
		if (type == DFC_ACK_TYPE_DISABLE)
			dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq);
		else if (type == DFC_ACK_TYPE_THRESHOLD)
			dfc_qmap_send_query(mux_id, bearer_id);
		dfc_qmap_send_ack(qos, bearer_id, seq, type);
		return;
	}

@@ -1016,8 +1010,8 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,

		/* This is needed by qmap */
		if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
			dfc_qmap_send_end_marker_cnf(
				qos, itm->bearer_id, itm->seq);
			dfc_qmap_send_ack(qos, itm->bearer_id,
					  itm->seq, DFC_ACK_TYPE_DISABLE);

		itm->grant_size = fc_info->num_bytes;
		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
+2 −2
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@ struct rmnet_bearer_map {
	bool tcp_bidir;
	bool rat_switch;
	bool tx_off;
	u32 ack_txid;
};

struct rmnet_flow_map {
@@ -129,8 +130,7 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,

void dfc_qmap_client_exit(void *dfc_data);

void dfc_qmap_send_query(u8 mux_id, u8 bearer_id);
void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, u8 bearer_id, u16 seq);
void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
Loading