Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0b9278cf authored by Sean Tranchetti's avatar Sean Tranchetti Committed by Gerrit - the friendly Code Review server
Browse files

drivers: rmnet_perf: Flush on IP and QMAP length mismatch



Commit e4d2a0199758 ("drivers: rmnet: perf: Determine lengths based on
IP") gave preference to the lengths set in the IP headers over the QMAP
packet length in order to avoid absorbing any padding after the IP packet
into the final coalesced packet.

Unfortunately, this did not account for the case when the IP length was
LARGER than the QMAP length, which can cause out of bounds access when
making the new packet. This patch handles both cases by checking both
the QMAP and the IP header lengths, and flushing the packet to the stack
if they disagree.

Change-Id: Iaa2c179aac3514914d48580dbbf54e9cba792278
Signed-off-by: default avatarSean Tranchetti <stranche@codeaurora.org>
parent c7cd0928
Loading
Loading
Loading
Loading
+46 −10
Original line number Original line Diff line number Diff line
@@ -110,6 +110,11 @@ module_param(rmnet_perf_frag_flush, ulong, 0444);
MODULE_PARM_DESC(rmnet_perf_frag_flush,
MODULE_PARM_DESC(rmnet_perf_frag_flush,
		 "Number of packet fragments flushed to stack");
		 "Number of packet fragments flushed to stack");


unsigned long int rmnet_perf_qmap_size_mismatch = 0;
module_param(rmnet_perf_qmap_size_mismatch, ulong, 0444);
MODULE_PARM_DESC(rmnet_perf_qmap_size_mismatch,
		 "Number of mismatches b/w QMAP and IP lengths");

/* Handle deag by default for legacy behavior */
/* Handle deag by default for legacy behavior */
static bool rmnet_perf_ingress_deag = true;
static bool rmnet_perf_ingress_deag = true;
module_param(rmnet_perf_ingress_deag, bool, 0444);
module_param(rmnet_perf_ingress_deag, bool, 0444);
@@ -563,6 +568,8 @@ int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb,
 * @pkt_info: struct to fill in
 * @pkt_info: struct to fill in
 * @pkt_len: length of the packet
 * @pkt_len: length of the packet
 * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
 * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
 * @len_mismatch: set to true if there is a mismatch between the IP length and
 * the QMAP length of the packet
 *
 *
 * Return:
 * Return:
 *		- true if packet needs to be dropped
 *		- true if packet needs to be dropped
@@ -570,9 +577,11 @@ int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb,
 **/
 **/
bool rmnet_perf_core_dissect_pkt(unsigned char *payload,
bool rmnet_perf_core_dissect_pkt(unsigned char *payload,
				 struct rmnet_perf_pkt_info *pkt_info,
				 struct rmnet_perf_pkt_info *pkt_info,
				 int offset, u16 pkt_len, bool *skip_hash)
				 int offset, u16 pkt_len, bool *skip_hash,
				 bool *len_mismatch)
{
{
	bool flush = true;
	bool flush = true;
	bool mismatch = false;


	payload += offset;
	payload += offset;
	pkt_info->ip_proto = (*payload & 0xF0) >> 4;
	pkt_info->ip_proto = (*payload & 0xF0) >> 4;
@@ -588,6 +597,7 @@ bool rmnet_perf_core_dissect_pkt(unsigned char *payload,
			goto done;
			goto done;
		}
		}


		mismatch = pkt_len != ntohs(iph->tot_len);
		pkt_info->ip_len = iph->ihl * 4;
		pkt_info->ip_len = iph->ihl * 4;
		pkt_info->trans_proto = iph->protocol;
		pkt_info->trans_proto = iph->protocol;


@@ -640,6 +650,7 @@ bool rmnet_perf_core_dissect_pkt(unsigned char *payload,
			goto done;
			goto done;
		}
		}


		mismatch = pkt_len != ntohs(ip6h->payload_len) + sizeof(*ip6h);
		pkt_info->ip_len = (u16)len;
		pkt_info->ip_len = (u16)len;
		pkt_info->trans_proto = protocol;
		pkt_info->trans_proto = protocol;


@@ -688,6 +699,12 @@ bool rmnet_perf_core_dissect_pkt(unsigned char *payload,
	pkt_info->payload_len = pkt_len - pkt_info->ip_len -
	pkt_info->payload_len = pkt_len - pkt_info->ip_len -
				pkt_info->trans_len;
				pkt_info->trans_len;
	*skip_hash = flush;
	*skip_hash = flush;
	*len_mismatch = mismatch;
	if (mismatch) {
		rmnet_perf_qmap_size_mismatch++;
		if (!rmnet_perf_core_is_deag_mode())
			pkt_info->frag_desc->hdrs_valid = 0;
	}


	return false;
	return false;
}
}
@@ -699,6 +716,8 @@ bool rmnet_perf_core_dissect_pkt(unsigned char *payload,
 * @offset: offset from start of skb data to the IP header
 * @offset: offset from start of skb data to the IP header
 * @pkt_len: length of the packet
 * @pkt_len: length of the packet
 * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
 * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
 * @len_mismatch: set to true if there is a mismatch between the IP length and
 * the QMAP length of the packet
 *
 *
 * Return:
 * Return:
 *		- true if packet needs to be dropped
 *		- true if packet needs to be dropped
@@ -707,11 +726,12 @@ bool rmnet_perf_core_dissect_pkt(unsigned char *payload,


bool rmnet_perf_core_dissect_skb(struct sk_buff *skb,
bool rmnet_perf_core_dissect_skb(struct sk_buff *skb,
				 struct rmnet_perf_pkt_info *pkt_info,
				 struct rmnet_perf_pkt_info *pkt_info,
				 int offset, u16 pkt_len, bool *skip_hash)
				 int offset, u16 pkt_len, bool *skip_hash,
				 bool *len_mismatch)
{
{
	pkt_info->skb = skb;
	pkt_info->skb = skb;
	return rmnet_perf_core_dissect_pkt(skb->data, pkt_info, offset,
	return rmnet_perf_core_dissect_pkt(skb->data, pkt_info, offset,
					   pkt_len, skip_hash);
					   pkt_len, skip_hash, len_mismatch);
}
}


/* rmnet_perf_core_dissect_desc() - Extract packet header metadata for easier
/* rmnet_perf_core_dissect_desc() - Extract packet header metadata for easier
@@ -721,6 +741,8 @@ bool rmnet_perf_core_dissect_skb(struct sk_buff *skb,
 * @offset: offset from start of descriptor payload to the IP header
 * @offset: offset from start of descriptor payload to the IP header
 * @pkt_len: length of the packet
 * @pkt_len: length of the packet
 * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
 * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise
 * @len_mismatch: set tp true if there is a mismatch between the IP length and
 * the QMAP length of the packet
 *
 *
 * Return:
 * Return:
 *		- true if packet needs to be flushed out immediately
 *		- true if packet needs to be flushed out immediately
@@ -729,7 +751,8 @@ bool rmnet_perf_core_dissect_skb(struct sk_buff *skb,


bool rmnet_perf_core_dissect_desc(struct rmnet_frag_descriptor *frag_desc,
bool rmnet_perf_core_dissect_desc(struct rmnet_frag_descriptor *frag_desc,
				  struct rmnet_perf_pkt_info *pkt_info,
				  struct rmnet_perf_pkt_info *pkt_info,
				  int offset, u16 pkt_len, bool *skip_hash)
				  int offset, u16 pkt_len, bool *skip_hash,
				  bool *len_mismatch)
{
{
	u8 *payload = frag_desc->hdr_ptr;
	u8 *payload = frag_desc->hdr_ptr;


@@ -741,7 +764,7 @@ bool rmnet_perf_core_dissect_desc(struct rmnet_frag_descriptor *frag_desc,


	pkt_info->frag_desc = frag_desc;
	pkt_info->frag_desc = frag_desc;
	return rmnet_perf_core_dissect_pkt(payload, pkt_info, offset, pkt_len,
	return rmnet_perf_core_dissect_pkt(payload, pkt_info, offset, pkt_len,
					   skip_hash);
					   skip_hash, len_mismatch);
}
}


void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
@@ -752,13 +775,14 @@ void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
	unsigned int offset = sizeof(struct rmnet_map_header);
	unsigned int offset = sizeof(struct rmnet_map_header);
	u16 pkt_len;
	u16 pkt_len;
	bool skip_hash = false;
	bool skip_hash = false;
	bool len_mismatch = false;


	pkt_len = frame_len - offset - trailer_len;
	pkt_len = frame_len - offset - trailer_len;
	memset(pkt_info, 0, sizeof(*pkt_info));
	memset(pkt_info, 0, sizeof(*pkt_info));
	pkt_info->ep = ep;
	pkt_info->ep = ep;


	if (rmnet_perf_core_dissect_skb(skb, pkt_info, offset, pkt_len,
	if (rmnet_perf_core_dissect_skb(skb, pkt_info, offset, pkt_len,
					&skip_hash)) {
					&skip_hash, &len_mismatch)) {
		rmnet_perf_core_non_ip_count++;
		rmnet_perf_core_non_ip_count++;
		/* account for the bulk add in rmnet_perf_core_deaggregate() */
		/* account for the bulk add in rmnet_perf_core_deaggregate() */
		rmnet_perf_core_pre_ip_count--;
		rmnet_perf_core_pre_ip_count--;
@@ -769,6 +793,12 @@ void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
		/* We're flushing anyway, so no need to check result */
		/* We're flushing anyway, so no need to check result */
		rmnet_perf_core_validate_pkt_csum(skb, pkt_info);
		rmnet_perf_core_validate_pkt_csum(skb, pkt_info);
		goto flush;
		goto flush;
	} else if (len_mismatch) {
		/* We're flushing anyway, so no need to check result */
		rmnet_perf_core_validate_pkt_csum(skb, pkt_info);
		/* Flush anything in the hash to avoid any OOO */
		rmnet_perf_opt_flush_flow_by_hash(pkt_info->hash_key);
		goto flush;
	}
	}


	if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info))
	if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info))
@@ -797,12 +827,13 @@ void rmnet_perf_core_desc_entry(struct rmnet_frag_descriptor *frag_desc,
	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
	u16 pkt_len = skb_frag_size(&frag_desc->frag);
	u16 pkt_len = skb_frag_size(&frag_desc->frag);
	bool skip_hash = true;
	bool skip_hash = true;
	bool len_mismatch = false;


	rmnet_perf_core_grab_lock();
	rmnet_perf_core_grab_lock();
	perf->rmnet_port = port;
	perf->rmnet_port = port;
	memset(&pkt_info, 0, sizeof(pkt_info));
	memset(&pkt_info, 0, sizeof(pkt_info));
	if (rmnet_perf_core_dissect_desc(frag_desc, &pkt_info, 0, pkt_len,
	if (rmnet_perf_core_dissect_desc(frag_desc, &pkt_info, 0, pkt_len,
					 &skip_hash)) {
					 &skip_hash, &len_mismatch)) {
		rmnet_perf_core_non_ip_count++;
		rmnet_perf_core_non_ip_count++;
		rmnet_recycle_frag_descriptor(frag_desc, port);
		rmnet_recycle_frag_descriptor(frag_desc, port);
		rmnet_perf_core_release_lock();
		rmnet_perf_core_release_lock();
@@ -811,13 +842,18 @@ void rmnet_perf_core_desc_entry(struct rmnet_frag_descriptor *frag_desc,


	/* We know the packet is an IP packet now */
	/* We know the packet is an IP packet now */
	rmnet_perf_core_pre_ip_count++;
	rmnet_perf_core_pre_ip_count++;
	if (skip_hash)
	if (skip_hash) {
		goto flush;
		goto flush;
	} else if (len_mismatch) {
		/* Flush everything in the hash to avoid OOO */
		rmnet_perf_opt_flush_flow_by_hash(pkt_info.hash_key);
		goto flush;
	}


	/* Skip packets with bad checksums.
	/* Skip packets with bad checksums.
	 * This check is delayed here to allow packets that won't be
	 * This check is delayed here to allow packets that won't be
	 * checksummed by hardware (i.e. non-TCP/UDP data, fragments) to be
	 * checksummed by hardware (non-TCP/UDP data, fragments, padding) to be
	 * flushed by the above check. This ensures that we report statistics
	 * flushed by the above checks. This ensures that we report statistics
	 * correctly (i.e. rmnet_perf_frag_flush increases for each fragment),
	 * correctly (i.e. rmnet_perf_frag_flush increases for each fragment),
	 * and don't report packets with valid checksums that weren't offloaded
	 * and don't report packets with valid checksums that weren't offloaded
	 * as "bad checksum" packets.
	 * as "bad checksum" packets.
+18 −0
Original line number Original line Diff line number Diff line
@@ -572,6 +572,24 @@ void rmnet_perf_opt_flush_single_flow_node(
	}
	}
}
}


/* rmnet_perf_opt_flush_flow_by_hash() - Iterate through all flow nodes
 *	that match a certain hash and flush the match
 * @hash_val: hash value we are looking to match and hence flush
 *
 * Return:
 *    - void
 **/
void rmnet_perf_opt_flush_flow_by_hash(u32 hash_val)
{
	struct rmnet_perf_opt_flow_node *flow_node;

	hash_for_each_possible(rmnet_perf_opt_fht, flow_node, list, hash_val) {
		if (hash_val == flow_node->hash_value &&
		    flow_node->num_pkts_held > 0)
			rmnet_perf_opt_flush_single_flow_node(flow_node);
	}
}

/* rmnet_perf_opt_flush_all_flow_nodes() - Iterate through all flow nodes
/* rmnet_perf_opt_flush_all_flow_nodes() - Iterate through all flow nodes
 *		and flush them individually
 *		and flush them individually
 *
 *
+1 −0
Original line number Original line Diff line number Diff line
@@ -100,6 +100,7 @@ rmnet_perf_opt_update_flow(struct rmnet_perf_opt_flow_node *flow_node,
			   struct rmnet_perf_pkt_info *pkt_info);
			   struct rmnet_perf_pkt_info *pkt_info);
void rmnet_perf_opt_flush_single_flow_node(
void rmnet_perf_opt_flush_single_flow_node(
				struct rmnet_perf_opt_flow_node *flow_node);
				struct rmnet_perf_opt_flow_node *flow_node);
void rmnet_perf_opt_flush_flow_by_hash(u32 hash_val);
void rmnet_perf_opt_flush_all_flow_nodes(void);
void rmnet_perf_opt_flush_all_flow_nodes(void);
void rmnet_perf_opt_chain_end(void);
void rmnet_perf_opt_chain_end(void);
void rmnet_perf_opt_insert_pkt_in_flow(
void rmnet_perf_opt_insert_pkt_in_flow(