Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e38653cf authored by Arnav Sharma's avatar Arnav Sharma
Browse files

Fastforwarding data-kernel CRT:data.lnx.5.1-191021.3 to data.lnx.6.0

Change-Id: I1688b69628360b4f51d0e1286ecf445b9654ab81
parents efe63abb 7f9de3b9
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -1567,6 +1567,13 @@ int DWC_ETH_QOS_add_ipaddr(struct DWC_ETH_QOS_prv_data *pdata)
	return ret;
}

u32 l3mdev_fib_table1 (const struct net_device *dev)
{
	return RT_TABLE_LOCAL;
}

const struct l3mdev_ops l3mdev_op1 = {.l3mdev_fib_table = l3mdev_fib_table1};

static int DWC_ETH_QOS_configure_netdevice(struct platform_device *pdev)
{
	struct DWC_ETH_QOS_prv_data *pdata = NULL;
@@ -1642,6 +1649,15 @@ static int DWC_ETH_QOS_configure_netdevice(struct platform_device *pdev)
	/* store emac hw version in pdata*/
	pdata->emac_hw_version_type = dwc_eth_qos_res_data.emac_hw_version_type;

#ifdef CONFIG_NET_L3_MASTER_DEV
	if (pdata->res_data->early_eth_en && pdata->emac_hw_version_type == EMAC_HW_v2_3_1) {
		EMACDBG("l3mdev_op1 set \n");
		dev->priv_flags = IFF_L3MDEV_MASTER;
		dev->l3mdev_ops = &l3mdev_op1;
	}
#endif


	/* Scale the clocks to 10Mbps speed */
	if (pdata->res_data->early_eth_en) {
		pdata->speed = SPEED_100;
+37 −14
Original line number Diff line number Diff line
@@ -55,18 +55,30 @@ rmnet_perf_config_alloc_64k_buffs(struct rmnet_perf *perf)
{
	int i;
	struct sk_buff *skbn;
	struct rmnet_perf_core_64k_buff_pool *pool = perf->core_meta->buff_pool;
	enum rmnet_perf_resource_management_e return_val;
	struct rmnet_perf_core_64k_buff_pool *pool = perf->core_meta->buff_pool;

	return_val = RMNET_PERF_RESOURCE_MGMT_SUCCESS;

	memset(pool, 0, sizeof(struct rmnet_perf_core_64k_buff_pool));
	pool->index = 0;
	for (i = 0; i < RMNET_PERF_NUM_64K_BUFFS; i++) {
		skbn = alloc_skb(RMNET_PERF_CORE_RECYCLE_SKB_SIZE, GFP_ATOMIC);
		if (!skbn)
		if (!skbn) {
			int j;

			return_val = RMNET_PERF_RESOURCE_MGMT_FAIL;
			/* If one skb fails to allocate, dont use feature */
			for (j = i - 1; j >= 0; j--) {
				if (pool->available[j]) {
					kfree_skb(pool->available[j]);
					pool->available[j] = NULL;
				}
			}
			return return_val;
		}
		pool->available[i] = skbn;
	}
	pool->index = 0;

	return return_val;
}

@@ -89,12 +101,15 @@ static void rmnet_perf_config_free_64k_buffs(struct rmnet_perf *perf)
	/* Free both busy and available because if its truly busy,
	 * we will simply decrement the users count... This means NW stack
	 * will still have opportunity to process the packet as it wishes
	 * and will naturally free the sk_buff when it is done
	 * and will naturally free the sk_buff when it is done. Available[0]
	 * being not null means that all indexes of available are filled by
	 * SKBs from module initialization
	 */

	if (buff_pool->available[0]) {
		for (i = 0; i < RMNET_PERF_NUM_64K_BUFFS; i++)
			kfree_skb(buff_pool->available[i]);
	}
}

/* rmnet_perf_config_free_resources() - on rmnet teardown free all the
 *		related meta data structures
@@ -170,10 +185,10 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)

	/* allocate all the memory in one chunk for cache coherency sake */
	buffer_head = kmalloc(total_size, GFP_KERNEL);
	*perf = buffer_head;
	if (!buffer_head)
		return RMNET_PERF_RESOURCE_MGMT_FAIL;

	*perf = buffer_head;
	local_perf = *perf;
	buffer_head += perf_size;

@@ -216,6 +231,7 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)
	core_meta->bm_state->curr_seq = 0;
	core_meta->bm_state->expect_packets = 0;
	core_meta->bm_state->wait_for_start = true;
	core_meta->bm_state->callbacks_valid = false;
	buffer_head += bm_state_size;

	return RMNET_PERF_RESOURCE_MGMT_SUCCESS;
@@ -233,6 +249,7 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,
	perf->core_meta->dev = dev;
	/* register for DL marker */
	dl_ind = kzalloc(sizeof(struct rmnet_map_dl_ind), GFP_ATOMIC);
	perf->core_meta->dl_ind = dl_ind;
	if (dl_ind) {
		dl_ind->priority = RMNET_PERF;
		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2) {
@@ -246,10 +263,11 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,
			dl_ind->dl_trl_handler =
				&rmnet_perf_core_handle_map_control_end;
		}
		perf->core_meta->dl_ind = dl_ind;

		if (rmnet_map_dl_ind_register(port, dl_ind)) {
			kfree(dl_ind);
			pr_err("%s(): Failed to register dl_ind\n", __func__);
			perf->core_meta->dl_ind = NULL;
			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
		}
	} else {
@@ -259,13 +277,14 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,

	/* register for PS mode indications */
	ps_ind = kzalloc(sizeof(struct qmi_rmnet_ps_ind), GFP_ATOMIC);
	perf->core_meta->ps_ind = ps_ind;
	if (ps_ind) {
		ps_ind->ps_on_handler = &rmnet_perf_core_ps_on;
		ps_ind->ps_off_handler = &rmnet_perf_core_ps_off;
		perf->core_meta->ps_ind = ps_ind;
		if (qmi_rmnet_ps_ind_register(port, ps_ind)) {
			kfree(ps_ind);
			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
			perf->core_meta->ps_ind = NULL;
			pr_err("%s(): Failed to register ps_ind\n", __func__);
		}
	} else {
@@ -273,6 +292,9 @@ rmnet_perf_config_register_callbacks(struct net_device *dev,
		pr_err("%s(): Failed to allocate ps_ind\n", __func__);
	}

	if (rc == RMNET_PERF_RESOURCE_MGMT_SUCCESS)
		perf->core_meta->bm_state->callbacks_valid = true;

	return rc;
}

@@ -297,9 +319,11 @@ static int rmnet_perf_netdev_up(struct net_device *real_dev,
	 */
	rc = rmnet_perf_config_alloc_64k_buffs(perf);
	if (rc == RMNET_PERF_RESOURCE_MGMT_FAIL) {
		/* Since recycling buffers isnt a feature we use, refrain
		 * from returning with a return failure status
		 */
		pr_err("%s(): Failed to allocate 64k buffers for recycling\n",
		       __func__);
		return RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL;
	}

	rc = rmnet_perf_config_register_callbacks(real_dev, port);
@@ -411,8 +435,7 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
				goto exit;
			} else if (return_val ==
				   RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL) {
				pr_err("%s(): rmnet_perf recycle buffer "
				       "allocation or callback registry "
				pr_err("%s(): rmnet_perf callback registry "
				       "failed. Continue without them\n",
					__func__);
			}
+10 −6
Original line number Diff line number Diff line
@@ -122,6 +122,7 @@ MODULE_PARM_DESC(rmnet_perf_ingress_deag,
		 "If true, rmnet_perf will handle QMAP deaggregation");

#define SHS_FLUSH				0
#define RECYCLE_BUFF_SIZE_THRESH		51200

/* Lock around flow nodes for syncornization with rmnet_perf_opt_mode changes */
static DEFINE_SPINLOCK(rmnet_perf_core_lock);
@@ -246,9 +247,10 @@ struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(u32 len)
	struct sk_buff *skbn;
	int user_count;

	if (len < 51200)
		return NULL;
	buff_pool = perf->core_meta->buff_pool;
	if (len < RECYCLE_BUFF_SIZE_THRESH || !buff_pool->available[0])
		return NULL;

	circ_index = buff_pool->index;
	iterations = 0;
	while (iterations < RMNET_PERF_NUM_64K_BUFFS) {
@@ -1005,6 +1007,7 @@ void rmnet_perf_core_deaggregate(struct sk_buff *skb,
				 struct rmnet_port *port)
{
	struct rmnet_perf *perf;
	struct rmnet_perf_core_burst_marker_state *bm_state;
	int co = 0;
	int chain_count = 0;

@@ -1021,13 +1024,14 @@ void rmnet_perf_core_deaggregate(struct sk_buff *skb,
		skb = skb_frag;
	}

	perf->core_meta->bm_state->expect_packets -= co;
	bm_state = perf->core_meta->bm_state;
	bm_state->expect_packets -= co;
	/* if we ran out of data and should have gotten an end marker,
	 * then we can flush everything
	 */
	if (port->data_format == RMNET_INGRESS_FORMAT_DL_MARKER_V2 ||
	    !rmnet_perf_core_bm_flush_on ||
	    (int) perf->core_meta->bm_state->expect_packets <= 0) {
	    !bm_state->callbacks_valid || !rmnet_perf_core_bm_flush_on ||
	    (int) bm_state->expect_packets <= 0) {
		rmnet_perf_opt_flush_all_flow_nodes();
		rmnet_perf_core_free_held_skbs();
		rmnet_perf_core_flush_reason_cnt[
+4 −0
Original line number Diff line number Diff line
@@ -78,6 +78,10 @@ struct rmnet_perf_core_64k_buff_pool {

struct rmnet_perf_core_burst_marker_state {
	bool wait_for_start;
	/* If the callbacks fail to register, then we want to flush at the
	 * end of every chain
	 */
	bool callbacks_valid;
	u32 curr_seq;
	u32 expect_packets;
};
+70 −54
Original line number Diff line number Diff line
@@ -70,26 +70,26 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
	struct net_device *dev = netdev_notifier_info_to_dev(data);
	struct rmnet_priv *priv;
	struct rmnet_port *port;
	int ret = 0;

	if (!dev) {
		rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
		return NOTIFY_DONE;
	}

	if (!(strncmp(dev->name, "rmnet_data", 10) == 0 ||
	      strncmp(dev->name, "r_rmnet_data", 12) == 0))
		return NOTIFY_DONE;

	switch (event) {
	case NETDEV_GOING_DOWN:
	case NETDEV_UNREGISTER:
		rmnet_shs_wq_reset_ep_active(dev);

		if (strncmp(dev->name, "rmnet_data", 10) == 0)
		rmnet_vnd_total--;

		/* Deinitialize if last vnd is going down or if
		 * phy_dev is going down.
		 */
		if ((rmnet_is_real_dev_registered(dev) &&
		    (!strcmp(dev->name, "rmnet_ipa0") ||
		    !strcmp(dev->name, "rmnet_mhi0"))) &&
		    rmnet_shs_cfg.rmnet_shs_init_complete) {
		if (!rmnet_vnd_total && rmnet_shs_cfg.rmnet_shs_init_complete) {
			pr_info("rmnet_shs deinit %s going down ", dev->name);
			RCU_INIT_POINTER(rmnet_shs_skb_entry, NULL);
			qmi_rmnet_ps_ind_deregister(rmnet_shs_cfg.port,
@@ -105,18 +105,10 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
		}
		break;

	case NETDEV_UP:

		if (strncmp(dev->name, "rmnet_data", 10) == 0){
	case NETDEV_REGISTER:
		rmnet_vnd_total++;
		}

		if (strncmp(dev->name, "rmnet_data", 10) == 0) {
			/* Need separate if check to avoid
			 * NULL dereferencing
			 */

			if (!rmnet_shs_cfg.rmnet_shs_init_complete) {
		if (rmnet_vnd_total && !rmnet_shs_cfg.rmnet_shs_init_complete) {
			pr_info("rmnet_shs initializing %s", dev->name);
			priv = netdev_priv(dev);
			port = rmnet_get_port(priv->real_dev);
@@ -127,7 +119,22 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
			rmnet_shs_init(priv->real_dev, dev);
			rmnet_shs_wq_init(priv->real_dev);
			rmnet_shs_rx_wq_init();

			rmnet_shs_cfg.is_timer_init = 1;
		}
		rmnet_shs_wq_set_ep_active(dev);

		break;
	case NETDEV_UP:
		if (!rmnet_shs_cfg.is_reg_dl_mrk_ind &&
		    rmnet_shs_cfg.rmnet_shs_init_complete) {

			port = rmnet_shs_cfg.port;
			if (!port) {
				pr_err("rmnet_shs: invalid rmnet_cfg_port");
				break;
			}

			rmnet_shs_cfg.dl_mrk_ind_cb.priority =
				RMNET_SHS;
			if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2) {
@@ -141,22 +148,31 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
				rmnet_shs_cfg.dl_mrk_ind_cb.dl_trl_handler =
					&rmnet_shs_dl_trl_handler;
			}
			rmnet_shs_cfg.rmnet_idl_ind_cb.ps_on_handler =
					&rmnet_shs_ps_on_hdlr;
			rmnet_shs_cfg.rmnet_idl_ind_cb.ps_off_handler =
					&rmnet_shs_ps_off_hdlr;

			ret = rmnet_map_dl_ind_register(port,
						        &rmnet_shs_cfg.dl_mrk_ind_cb);
			if (ret)
				pr_err("%s(): rmnet dl_ind registration fail\n",
				       __func__);

			ret = qmi_rmnet_ps_ind_register(port,
						        &rmnet_shs_cfg.rmnet_idl_ind_cb);
			if (ret)
				pr_err("%s(): rmnet ps_ind registration fail\n",
				       __func__);
			rmnet_shs_update_cfg_mask();
			rmnet_shs_wq_refresh_new_flow_list();
			rmnet_shs_cfg.is_reg_dl_mrk_ind = 1;
			trace_rmnet_shs_high(RMNET_SHS_MODULE,
					     RMNET_SHS_MODULE_INIT_WQ,
					     0xDEF, 0xDEF, 0xDEF,
					     0xDEF, NULL, NULL);
				rmnet_shs_cfg.rmnet_idl_ind_cb.ps_on_handler =
						&rmnet_shs_ps_on_hdlr;
				rmnet_shs_cfg.rmnet_idl_ind_cb.ps_off_handler =
						&rmnet_shs_ps_off_hdlr;
			RCU_INIT_POINTER(rmnet_shs_skb_entry,
					 rmnet_shs_assign);


			}
			rmnet_shs_wq_set_ep_active(dev);

		}

		break;
Loading