Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20f73a0d authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan Committed by Gerrit - the friendly Code Review server
Browse files

drivers: rmnet: shs: add segmentation levels for slow start flows



Adds various levels of segmentation for flows in TCP slow start.
Instead of segmentation causing all packets to be 1500 bytes,
we will control how much larger packets get broken up by
passing segs_per_skb, which indicates how many MTU sizes packets
should be in the newly segmented SKBs. i.e. segs_per_skb = 2
means 2*MTU can be passed in a segmented skb.

Change-Id: I422a794f3b1d3f2e313ce8f89695a536984cd947
Acked-by: default avatarRyan Chapman <rchapman@qti.qualcomm.com>
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent b74079bc
Loading
Loading
Loading
Loading
+51 −8
Original line number Diff line number Diff line
@@ -388,8 +388,49 @@ static void rmnet_shs_deliver_skb_wq(struct sk_buff *skb)
	gro_cells_receive(&priv->gro_cells, skb);
}

static struct sk_buff *rmnet_shs_skb_partial_segment(struct sk_buff *skb,
						     u16 segments_per_skb)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	struct sk_buff *segments, *tmp;
	u16 gso_size = shinfo->gso_size;
	u16 gso_segs = shinfo->gso_segs;

	if (segments_per_skb >= gso_segs) {
		return NULL;
	}

	/* Update the numbers for the main skb */
	shinfo->gso_segs = DIV_ROUND_UP(gso_segs, segments_per_skb);
	shinfo->gso_size = gso_size * segments_per_skb;
	segments = __skb_gso_segment(skb, NETIF_F_SG, false);
	if (unlikely(IS_ERR_OR_NULL(segments))) {
		/* return to the original state */
		shinfo->gso_size = gso_size;
		shinfo->gso_segs = gso_segs;
		return NULL;
	}

	/* Mark correct number of segments and correct size in the new skbs */
	for (tmp = segments; tmp; tmp = tmp->next) {
		struct skb_shared_info *new_shinfo = skb_shinfo(tmp);

		new_shinfo->gso_size = gso_size;
		if (gso_segs >= segments_per_skb)
			new_shinfo->gso_segs = segments_per_skb;
		else
			new_shinfo->gso_segs = gso_segs;

		gso_segs -= segments_per_skb;
	}

	return segments;
}

/* Delivers skbs after segmenting, directly to network stack */
static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb,
					    u8 ctext,
					    u16 segs_per_skb)
{
	struct sk_buff *skb = NULL;
	struct sk_buff *nxt_skb = NULL;
@@ -399,8 +440,9 @@ static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
	SHS_TRACE_LOW(RMNET_SHS_DELIVER_SKB, RMNET_SHS_DELIVER_SKB_START,
			    0x1, 0xDEF, 0xDEF, 0xDEF, in_skb, NULL);

	segs = __skb_gso_segment(in_skb, NETIF_F_SG, false);
	if (unlikely(IS_ERR_OR_NULL(segs))) {
	segs = rmnet_shs_skb_partial_segment(in_skb, segs_per_skb);

	if (segs == NULL) {
		if (ctext == RMNET_RX_CTXT)
			netif_receive_skb(in_skb);
		else
@@ -409,7 +451,7 @@ static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
		return;
	}

	/* Send segmeneted skb */
	/* Send segmented skb */
	for ((skb = segs); skb != NULL; skb = nxt_skb) {
		nxt_skb = skb->next;

@@ -926,7 +968,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
	u32 skb_bytes_delivered = 0;
	u32 hash2stamp = 0; /* the default value of skb->hash*/
	u8 map = 0, maplen = 0;
	u8 segment_enable = 0;
	u16 segs_per_skb = 0;

	if (!node->skb_list.head)
		return;
@@ -948,7 +990,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
			     node->skb_list.num_parked_bytes,
			     node, node->skb_list.head);

	segment_enable = node->hstats->segment_enable;
	segs_per_skb = (u16) node->hstats->segs_per_skb;

	for ((skb = node->skb_list.head); skb != NULL; skb = nxt_skb) {

@@ -960,8 +1002,9 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
		skbs_delivered += 1;
		skb_bytes_delivered += skb->len;

		if (segment_enable) {
			rmnet_shs_deliver_skb_segmented(skb, ctext);
		if (segs_per_skb > 0) {
			rmnet_shs_deliver_skb_segmented(skb, ctext,
							segs_per_skb);
		} else {
			if (ctext == RMNET_RX_CTXT)
				rmnet_shs_deliver_skb(skb);
+12 −10
Original line number Diff line number Diff line
@@ -42,6 +42,8 @@ MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals

#define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur)

#define RMNET_SHS_SEGS_PER_SKB_DEFAULT (2)

unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
module_param(rmnet_shs_wq_interval_ms, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");
@@ -411,7 +413,7 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
		/* Start TCP flows with segmentation if userspace connected */
		if (rmnet_shs_userspace_connected &&
		    node_p->hstats->skb_tport_proto == IPPROTO_TCP)
			node_p->hstats->segment_enable = 1;
			node_p->hstats->segs_per_skb = RMNET_SHS_SEGS_PER_SKB_DEFAULT;

		node_p->hstats->node = node_p;
		node_p->hstats->c_epoch = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) +
@@ -1291,7 +1293,7 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
}

/* Change flow segmentation, return 1 if set, 0 otherwise */
int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb)
{
	struct rmnet_shs_skbn_s *node_p;
	struct rmnet_shs_wq_hstat_s *hstat_p;
@@ -1311,22 +1313,22 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
		if (hstat_p->hash != hash_to_set)
			continue;

		rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u",
		       hash_to_set, seg_enable);
		rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u",
		       hash_to_set, segs_per_skb);

		trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
				RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
				hstat_p->hash, seg_enable,
				hstat_p->hash, segs_per_skb,
				0xDEF, 0xDEF, hstat_p, NULL);

		node_p->hstats->segment_enable = seg_enable;
		node_p->hstats->segs_per_skb = segs_per_skb;
		spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
		return 1;
	}
	spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);

	rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
	       hash_to_set, seg_enable);
	rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u not set - hash not found",
	       hash_to_set, segs_per_skb);
	return 0;
}

@@ -1966,7 +1968,7 @@ void rmnet_shs_wq_filter(void)
			continue;
		}

		if (hnode->node->hstats->segment_enable) {
		if (hnode->node->hstats->segs_per_skb > 0) {
			rmnet_shs_cpu_node_tbl[cur_cpu].seg++;
		}
	}
@@ -2003,7 +2005,7 @@ void rmnet_shs_wq_update_stats(void)
				}
			} else {
				/* Disable segmentation if userspace gets disconnected connected */
				hnode->node->hstats->segment_enable = 0;
				hnode->node->hstats->segs_per_skb = 0;
			}
		}
	}
+2 −2
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ struct rmnet_shs_wq_hstat_s {
	u8 in_use;
	u8 is_perm;
	u8 is_new_flow;
	u8 segment_enable; /* segment coalesces packets */
	u8 segs_per_skb; /* segments per skb */
};

struct rmnet_shs_wq_cpu_rx_pkt_q_s {
@@ -288,7 +288,7 @@ void rmnet_shs_wq_refresh_new_flow_list(void);
int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
				  u32 sugg_type);

int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable);
int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb);

void rmnet_shs_wq_ep_lock_bh(void);

+5 −5
Original line number Diff line number Diff line
@@ -209,24 +209,24 @@ int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info
	if (na) {
		if (nla_memcpy(&seg_info, na, sizeof(seg_info)) > 0) {
			rm_err("SHS_GNL: recv segmentation req "
			       "hash_to_set = 0x%x segment_enable = %u",
			       "hash_to_set = 0x%x segs_per_skb = %u",
			       seg_info.hash_to_set,
			       seg_info.segment_enable);
			       seg_info.segs_per_skb);

			rc = rmnet_shs_wq_set_flow_segmentation(seg_info.hash_to_set,
								seg_info.segment_enable);
								seg_info.segs_per_skb);

			if (rc == 1) {
				rmnet_shs_genl_send_int_to_userspace(info, 0);
				trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
					RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
					seg_info.hash_to_set, seg_info.segment_enable,
					seg_info.hash_to_set, seg_info.segs_per_skb,
					0xDEF, 0xDEF, NULL, NULL);
			} else {
				rmnet_shs_genl_send_int_to_userspace(info, -1);
				trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
					RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
					seg_info.hash_to_set, seg_info.segment_enable,
					seg_info.hash_to_set, seg_info.segs_per_skb,
					0xDEF, 0xDEF, NULL, NULL);
				return 0;
			}
+1 −1
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ struct rmnet_shs_wq_sugg_info {

struct rmnet_shs_wq_seg_info {
	uint32_t hash_to_set;
	uint32_t segment_enable;
	uint32_t segs_per_skb;
};

/* Function Prototypes */