Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 58260cbc authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "net: qualcomm: rmnet: Allow uplink aggregation configuration"

parents f4fd332c e33692b9
Loading
Loading
Loading
Loading
+63 −9
Original line number Diff line number Diff line
@@ -38,10 +38,25 @@

/* Local Definitions and Declarations */

static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 2] = {
	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
	[IFLA_VLAN_EGRESS_QOS]	= { .len = sizeof(struct tcmsg) },
enum {
	IFLA_RMNET_DFC_QOS = __IFLA_RMNET_MAX,
	IFLA_RMNET_UL_AGG_PARAMS,
	__IFLA_RMNET_EXT_MAX,
};

static const struct nla_policy rmnet_policy[__IFLA_RMNET_EXT_MAX] = {
	[IFLA_RMNET_MUX_ID] = {
		.type = NLA_U16
	},
	[IFLA_RMNET_FLAGS] = {
		.len = sizeof(struct ifla_rmnet_flags)
	},
	[IFLA_RMNET_DFC_QOS] = {
		.len = sizeof(struct tcmsg)
	},
	[IFLA_RMNET_UL_AGG_PARAMS] = {
		.len = sizeof(struct rmnet_egress_agg_params)
	},
};

int rmnet_is_real_dev_registered(const struct net_device *real_dev)
@@ -182,6 +197,17 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
	port->data_format = data_format;

	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
		void *agg_params;
		unsigned long irq_flags;

		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
		spin_lock_irqsave(&port->agg_lock, irq_flags);
		memcpy(&port->egress_agg_params, agg_params,
		       sizeof(port->egress_agg_params));
		spin_unlock_irqrestore(&port->agg_lock, irq_flags);
	}

	return 0;

err1:
@@ -286,6 +312,7 @@ static struct notifier_block rmnet_dev_notifier __read_mostly = {
static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
			       struct netlink_ext_ack *extack)
{
	struct rmnet_egress_agg_params *agg_params;
	u16 mux_id;

	if (!data) {
@@ -296,6 +323,12 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
			if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
				return -ERANGE;
		}

		if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
			agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
			if (agg_params->agg_time < 3000000)
				return -EINVAL;
		}
	}

	return 0;
@@ -339,13 +372,24 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
		port->data_format = flags->flags & flags->mask;
	}

	if (data[IFLA_VLAN_EGRESS_QOS]) {
	if (data[IFLA_RMNET_DFC_QOS]) {
		struct tcmsg *tcm;

		tcm = nla_data(data[IFLA_VLAN_EGRESS_QOS]);
		tcm = nla_data(data[IFLA_RMNET_DFC_QOS]);
		qmi_rmnet_change_link(dev, port, tcm);
	}

	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
		void *agg_params;
		unsigned long irq_flags;

		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
		spin_lock_irqsave(&port->agg_lock, irq_flags);
		memcpy(&port->egress_agg_params, agg_params,
		       sizeof(port->egress_agg_params));
		spin_unlock_irqrestore(&port->agg_lock, irq_flags);
	}

	return 0;
}

@@ -356,7 +400,10 @@ static size_t rmnet_get_size(const struct net_device *dev)
		nla_total_size(2) +
		/* IFLA_RMNET_FLAGS */
		nla_total_size(sizeof(struct ifla_rmnet_flags)) +
		nla_total_size(sizeof(struct tcmsg));
		/* IFLA_RMNET_DFC_QOS */
		nla_total_size(sizeof(struct tcmsg)) +
		/* IFLA_RMNET_UL_AGG_PARAMS */
		nla_total_size(sizeof(struct rmnet_egress_agg_params));
}

static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -364,7 +411,7 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
	struct rmnet_priv *priv = netdev_priv(dev);
	struct net_device *real_dev;
	struct ifla_rmnet_flags f;
	struct rmnet_port *port;
	struct rmnet_port *port = NULL;

	real_dev = priv->real_dev;

@@ -383,6 +430,13 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
		goto nla_put_failure;

	if (port) {
		if (nla_put(skb, IFLA_RMNET_UL_AGG_PARAMS,
			    sizeof(port->egress_agg_params),
			    &port->egress_agg_params))
			goto nla_put_failure;
	}

	return 0;

nla_put_failure:
@@ -391,7 +445,7 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)

struct rtnl_link_ops rmnet_link_ops __read_mostly = {
	.kind		= "rmnet",
	.maxtype	= __IFLA_RMNET_MAX,
	.maxtype	= __IFLA_RMNET_EXT_MAX,
	.priv_size	= sizeof(struct rmnet_priv),
	.setup		= rmnet_vnd_setup,
	.validate	= rmnet_rtnl_validate,
+7 −2
Original line number Diff line number Diff line
@@ -32,6 +32,12 @@ struct rmnet_port_priv_stats {
	u64 dl_trl_count;
};

struct rmnet_egress_agg_params {
	u16 agg_size;
	u16 agg_count;
	u32 agg_time;
};

/* One instance of this structure is instantiated for each real_dev associated
 * with rmnet.
 */
@@ -44,8 +50,7 @@ struct rmnet_port {
	struct net_device *bridge_ep;
	void *rmnet_perf;

	u16 egress_agg_size;
	u16 egress_agg_count;
	struct rmnet_egress_agg_params egress_agg_params;

	/* Protect aggregation related elements */
	spinlock_t agg_lock;
+9 −6
Original line number Diff line number Diff line
@@ -1128,7 +1128,7 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
		 * sparse, don't aggregate. We will need to tune this later
		 */
		diff = timespec_sub(port->agg_last, last);
		size = port->egress_agg_size - skb->len;
		size = port->egress_agg_params.agg_size - skb->len;

		if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
		    size <= 0) {
@@ -1155,9 +1155,10 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
		goto schedule;
	}
	diff = timespec_sub(port->agg_last, port->agg_time);
	size = port->egress_agg_params.agg_size - port->agg_skb->len;

	if (skb->len > (port->egress_agg_size - port->agg_skb->len) ||
	    port->agg_count >= port->egress_agg_count ||
	if (skb->len > size ||
	    port->agg_count >= port->egress_agg_params.agg_count ||
	    diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
		agg_skb = port->agg_skb;
		agg_count = port->agg_count;
@@ -1179,7 +1180,8 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
schedule:
	if (port->agg_state != -EINPROGRESS) {
		port->agg_state = -EINPROGRESS;
		hrtimer_start(&port->hrtimer, ns_to_ktime(3000000),
		hrtimer_start(&port->hrtimer,
			      ns_to_ktime(port->egress_agg_params.agg_time),
			      HRTIMER_MODE_REL);
	}
	spin_unlock_irqrestore(&port->agg_lock, flags);
@@ -1189,8 +1191,9 @@ void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
	port->egress_agg_size = 8192;
	port->egress_agg_count = 20;
	port->egress_agg_params.agg_size = 8192;
	port->egress_agg_params.agg_count = 20;
	port->egress_agg_params.agg_time = 3000000;
	spin_lock_init(&port->agg_lock);

	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);