Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ff99175 authored by Conner Huff's avatar Conner Huff Committed by Gerrit - the friendly Code Review server
Browse files

drivers: rmnet: perf: Safely free recycle buffers



Only free recycled buffers which have successfully been
allocated. In the event allocation fails, we deallocate
all previously allocated buffers, and we disable the feature
entirely. Also, handle other resources more carefully
during module bring up and teardown. Make sure resources
are null if they fail to allocate.

Additionally, modify the way that handling of allocation
failures on init works. Now if one feature fails it won't
bring down the operation of another feature. Now core_meta
has access to a flag which indicates if the callbacks were
completed successfully. If they were not, it is set to
indicate that we must flush at the end of every chain since
we can't rely on bm/ps.

[ 1026.970767] Unable to handle kernel paging request at virtual
address ffffffbfaaaaaaa0
[ 1026.979098] Mem abort info:
[ 1026.981973] Exception class = DABT (current EL), IL = 32 bits
[ 1026.988147] SET = 0, FnV = 0
[ 1026.991687] EA = 0, S1PTW = 0
[ 1026.995578] FSC = 6
[ 1026.997993] Data abort info:
[ 1027.001962] ISV = 0, ISS = 0x00000006
[ 1027.006283] CM = 0, WnR = 0
[ 1027.011503] swapper pgtable: 4k pages, 39-bit VAs, pgd = ffffff812120f000
[ 1027.018667] [ffffffbfaaaaaaa0] *pgd=00000001bcbfc003,
*pud=00000001bcbfc003, *pmd=0000000000000000
[ 1027.028686] Internal error: Oops: 96000006 [#1] PREEMPT SMP
[ 1027.048533] mhi 0001:01:00.0: enabling device (0000 - 0002)
[ 1027.084511] CPU: 5 PID: 17777 Comm: modprobe Tainted: G S W O 4.14.83+ #1
[ 1027.101460] task: ffffffec3f430080 task.stack: ffffff8052338000
[ 1027.107541] pc : kfree+0xe8/0x62c
[ 1027.110954] lr : rmnet_perf_config_notify_cb+0xf8/0x484 [rmnet_perf]
[ 1027.328866] Call trace:
[ 1027.331381] kfree+0xe8/0x62c
[ 1027.334425] rmnet_perf_config_notify_cb+0xf8/0x484 [rmnet_perf]
[ 1027.340592] unregister_netdevice_notifier+0xc0/0x114
[ 1027.345780] rmnet_perf_exit+0x40/0x60c [rmnet_perf]
[ 1027.350870] SyS_delete_module+0x1b8/0x224
[ 1027.355078] el0_svc_naked+0x34/0x38
[ 1027.358746] Code: f2ffffe9 aa090109 d2dff7ea f2ffffea (f9400129)

Change-Id: Ieb12697fe23b6e7de39ae352a5481e6ae454e126
Signed-off-by: default avatarConner Huff <chuff@codeaurora.org>
parent 0e8ad333
Loading
Loading
Loading
Loading
+37 −14
Original line number Diff line number Diff line
@@ -55,18 +55,30 @@ rmnet_perf_config_alloc_64k_buffs(struct rmnet_perf *perf)
{
	int i;
	struct sk_buff *skbn;
	struct rmnet_perf_core_64k_buff_pool *pool = perf->core_meta->buff_pool;
	enum rmnet_perf_resource_management_e return_val;
	struct rmnet_perf_core_64k_buff_pool *pool = perf->core_meta->buff_pool;

	return_val = RMNET_PERF_RESOURCE_MGMT_SUCCESS;

	memset(pool, 0, sizeof(struct rmnet_perf_core_64k_buff_pool));
	pool->index = 0;
	for (i = 0; i < RMNET_PERF_NUM_64K_BUFFS; i++) {
		skbn = alloc_skb(RMNET_PERF_CORE_RECYCLE_SKB_SIZE, GFP_ATOMIC);
		if (!skbn)
		if (!skbn) {
			int j;

			return_val = RMNET_PERF_RESOURCE_MGMT_FAIL;
			/* If one skb fails to allocate, dont use feature */
			for (j = i - 1; j >= 0; j--) {
				if (pool->available[j]) {
					kfree_skb(pool->available[j]);
					pool->available[j] = NULL;
				}
			}
			return return_val;
		}
		pool->available[i] = skbn;
	}
	pool->index = 0;

	return return_val;
}

@@ -89,12 +101,15 @@ static void rmnet_perf_config_free_64k_buffs(struct rmnet_perf *perf)
	/* Free both busy and available because if its truly busy,
	 * we will simply decrement the users count... This means NW stack
	 * will still have opportunity to process the packet as it wishes
	 * and will naturally free the sk_buff when it is done
	 * and will naturally free the sk_buff when it is done. Available[0]
	 * being not null means that all indexes of available are filled by
	 * SKBs from module initialization
	 */

	if (buff_pool->available[0]) {
		for (i = 0; i < RMNET_PERF_NUM_64K_BUFFS; i++)
			kfree_skb(buff_pool->available[i]);
	}
}

/* rmnet_perf_config_free_resources() - on rmnet teardown free all the
 *		related meta data structures
@@ -170,10 +185,10 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)

	/* allocate all the memory in one chunk for cache coherency sake */
	buffer_head = kmalloc(total_size, GFP_KERNEL);
	*perf = buffer_head;
	if (!buffer_head)
		return RMNET_PERF_RESOURCE_MGMT_FAIL;

	*perf = buffer_head;
	local_perf = *perf;
	buffer_head += perf_size;

@@ -216,6 +231,7 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)
	core_meta->bm_state->curr_seq = 0;
	core_meta->bm_state->expect_packets = 0;
	core_meta->bm_state->wait_for_start = true;
	core_meta->bm_state->callbacks_valid = false;
	buffer_head += bm_state_size;

	return RMNET_PERF_RESOURCE_MGMT_SUCCESS;
@@ -233,6 +249,7 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,
	perf->core_meta->dev = dev;
	/* register for DL marker */
	dl_ind = kzalloc(sizeof(struct rmnet_map_dl_ind), GFP_ATOMIC);
	perf->core_meta->dl_ind = dl_ind;
	if (dl_ind) {
		dl_ind->priority = RMNET_PERF;
		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2) {
@@ -246,10 +263,11 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,
			dl_ind->dl_trl_handler =
				&rmnet_perf_core_handle_map_control_end;
		}
		perf->core_meta->dl_ind = dl_ind;

		if (rmnet_map_dl_ind_register(port, dl_ind)) {
			kfree(dl_ind);
			pr_err("%s(): Failed to register dl_ind\n", __func__);
			perf->core_meta->dl_ind = NULL;
			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
		}
	} else {
@@ -259,13 +277,14 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,

	/* register for PS mode indications */
	ps_ind = kzalloc(sizeof(struct qmi_rmnet_ps_ind), GFP_ATOMIC);
	perf->core_meta->ps_ind = ps_ind;
	if (ps_ind) {
		ps_ind->ps_on_handler = &rmnet_perf_core_ps_on;
		ps_ind->ps_off_handler = &rmnet_perf_core_ps_off;
		perf->core_meta->ps_ind = ps_ind;
		if (qmi_rmnet_ps_ind_register(port, ps_ind)) {
			kfree(ps_ind);
			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
			perf->core_meta->ps_ind = NULL;
			pr_err("%s(): Failed to register ps_ind\n", __func__);
		}
	} else {
@@ -273,6 +292,9 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,
		pr_err("%s(): Failed to allocate ps_ind\n", __func__);
	}

	if (rc == RMNET_PERF_RESOURCE_MGMT_SUCCESS)
		perf->core_meta->bm_state->callbacks_valid = true;

	return rc;
}

@@ -297,9 +319,11 @@ static int rmnet_perf_netdev_up(struct net_device *real_dev,
	 */
	rc = rmnet_perf_config_alloc_64k_buffs(perf);
	if (rc == RMNET_PERF_RESOURCE_MGMT_FAIL) {
		/* Since recycling buffers isnt a feature we use, refrain
		 * from returning with a return failure status
		 */
		pr_err("%s(): Failed to allocate 64k buffers for recycling\n",
		       __func__);
		return RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL;
	}

	rc = rmnet_perf_config_register_callbacks(real_dev, port);
@@ -411,8 +435,7 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
				goto exit;
			} else if (return_val ==
				   RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL) {
				pr_err("%s(): rmnet_perf recycle buffer "
				       "allocation or callback registry "
				pr_err("%s(): rmnet_perf callback registry "
				       "failed. Continue without them\n",
					__func__);
			}
+10 −6
Original line number Diff line number Diff line
@@ -122,6 +122,7 @@ MODULE_PARM_DESC(rmnet_perf_ingress_deag,
		 "If true, rmnet_perf will handle QMAP deaggregation");

#define SHS_FLUSH				0
#define RECYCLE_BUFF_SIZE_THRESH		51200

/* Lock around flow nodes for syncornization with rmnet_perf_opt_mode changes */
static DEFINE_SPINLOCK(rmnet_perf_core_lock);
@@ -246,9 +247,10 @@ struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(u32 len)
	struct sk_buff *skbn;
	int user_count;

	if (len < 51200)
		return NULL;
	buff_pool = perf->core_meta->buff_pool;
	if (len < RECYCLE_BUFF_SIZE_THRESH || !buff_pool->available[0])
		return NULL;

	circ_index = buff_pool->index;
	iterations = 0;
	while (iterations < RMNET_PERF_NUM_64K_BUFFS) {
@@ -1005,6 +1007,7 @@ void rmnet_perf_core_deaggregate(struct sk_buff *skb,
				 struct rmnet_port *port)
{
	struct rmnet_perf *perf;
	struct rmnet_perf_core_burst_marker_state *bm_state;
	int co = 0;
	int chain_count = 0;

@@ -1021,13 +1024,14 @@ void rmnet_perf_core_deaggregate(struct sk_buff *skb,
		skb = skb_frag;
	}

	perf->core_meta->bm_state->expect_packets -= co;
	bm_state = perf->core_meta->bm_state;
	bm_state->expect_packets -= co;
	/* if we ran out of data and should have gotten an end marker,
	 * then we can flush everything
	 */
	if (port->data_format == RMNET_INGRESS_FORMAT_DL_MARKER_V2 ||
	    !rmnet_perf_core_bm_flush_on ||
	    (int) perf->core_meta->bm_state->expect_packets <= 0) {
	    !bm_state->callbacks_valid || !rmnet_perf_core_bm_flush_on ||
	    (int) bm_state->expect_packets <= 0) {
		rmnet_perf_opt_flush_all_flow_nodes();
		rmnet_perf_core_free_held_skbs();
		rmnet_perf_core_flush_reason_cnt[
+4 −0
Original line number Diff line number Diff line
@@ -78,6 +78,10 @@ struct rmnet_perf_core_64k_buff_pool {

struct rmnet_perf_core_burst_marker_state {
	bool wait_for_start;
	/* If the callbacks fail to register, then we want to flush at the
	 * end of every chain
	 */
	bool callbacks_valid;
	u32 curr_seq;
	u32 expect_packets;
};