Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5f149b99 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "drivers: rmnet_perf: Fix pre-segmented descriptor handling"

parents e5b651f7 87d3b304
Loading
Loading
Loading
Loading
+9 −2
Original line number Diff line number Diff line
@@ -731,9 +731,16 @@ bool rmnet_perf_core_dissect_desc(struct rmnet_frag_descriptor *frag_desc,
				  struct rmnet_perf_pkt_info *pkt_info,
				  int offset, u16 pkt_len, bool *skip_hash)
{
	u8 *payload = frag_desc->hdr_ptr;

	/* If this was segmented, the headers aren't in the pkt_len. Add them
	 * back for consistency.
	 */
	if (payload != rmnet_frag_data_ptr(frag_desc))
		pkt_len += frag_desc->ip_len + frag_desc->trans_len;

	pkt_info->frag_desc = frag_desc;
	return rmnet_perf_core_dissect_pkt(rmnet_frag_data_ptr(frag_desc),
					   pkt_info, offset, pkt_len,
	return rmnet_perf_core_dissect_pkt(payload, pkt_info, offset, pkt_len,
					   skip_hash);
}

+22 −6
Original line number Diff line number Diff line
@@ -317,8 +317,12 @@ rmnet_perf_opt_add_flow_subfrags(struct rmnet_perf_opt_flow_node *flow_node)
		struct rmnet_frag_descriptor *new_frag;

		new_frag = pkt_list[i].frag_desc;
		/* Pull headers if they're there */
		if (new_frag->hdr_ptr == rmnet_frag_data_ptr(new_frag))
			rmnet_frag_pull(new_frag, perf->rmnet_port,
				flow_node->ip_len + flow_node->trans_len);
					flow_node->ip_len +
					flow_node->trans_len);

		/* Move the fragment onto the subfrags list */
		list_move_tail(&new_frag->list, &head_frag->sub_frags);
		head_frag->gso_segs += (new_frag->gso_segs) ?: 1;
@@ -637,10 +641,6 @@ void rmnet_perf_opt_insert_pkt_in_flow(
		pkt_node->data_start = (unsigned char *)iph + header_len;
	}

	if (pkt_info->trans_proto == IPPROTO_TCP)
		flow_node->next_seq = ntohl(tp->seq) +
				      (__force u32) payload_len;

	if (pkt_info->first_packet) {
		/* Copy over flow information */
		flow_node->ep = pkt_info->ep;
@@ -673,7 +673,23 @@ void rmnet_perf_opt_insert_pkt_in_flow(
			flow_node->trans_proto =
				((struct ipv6hdr *)iph)->nexthdr;
		}

		/* Set initial TCP SEQ number */
		if (pkt_info->trans_proto == IPPROTO_TCP) {
			if (pkt_info->frag_desc &&
			    pkt_info->frag_desc->tcp_seq_set) {
				__be32 seq = pkt_info->frag_desc->tcp_seq;

				flow_node->next_seq = ntohl(seq);
			} else {
				flow_node->next_seq = ntohl(tp->seq);
			}
		}

	}

	if (pkt_info->trans_proto == IPPROTO_TCP)
		flow_node->next_seq += payload_len;
}

/* rmnet_perf_opt_ingress() - Core business logic of optimization framework
+6 −1
Original line number Diff line number Diff line
@@ -93,6 +93,7 @@ rmnet_perf_tcp_opt_pkt_can_be_merged(
				struct rmnet_perf_pkt_info *pkt_info)
{
	struct tcphdr *tp = pkt_info->trans_hdr.tp;
	u32 tcp_seq = ntohl(tp->seq);
	u16 gso_len;

	/* Use any previous GRO information, if present */
@@ -101,8 +102,12 @@ rmnet_perf_tcp_opt_pkt_can_be_merged(
	else
		gso_len = pkt_info->payload_len;

	/* Use stamped TCP SEQ number if we have it */
	if (pkt_info->frag_desc && pkt_info->frag_desc->tcp_seq_set)
		tcp_seq = ntohl(pkt_info->frag_desc->tcp_seq);

	/* 1. check ordering */
	if (flow_node->next_seq ^ ntohl(tp->seq)) {
	if (flow_node->next_seq ^ tcp_seq) {
		rmnet_perf_tcp_opt_fn_seq = flow_node->next_seq;
		rmnet_perf_tcp_opt_pkt_seq = ntohl(tp->seq);
		rmnet_perf_tcp_opt_flush_reason_cnt[