Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c0f59a7 authored by Sharath Chandra Vurukala's avatar Sharath Chandra Vurukala Committed by Gerrit - the friendly Code Review server
Browse files

dfc: bearer based QMAP powersave



Support bearer based QMAP Powersave.

Change-Id: Ic72448565fb54a867a899b6aedcd44bea72c4c5b
Signed-off-by: default avatarSharath Chandra Vurukala <quic_sharathv@quicinc.com>
parent 86129282
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 *
 * RMNET configuration engine
 *
@@ -717,6 +718,36 @@ bool rmnet_all_flows_enabled(void *port)
}
EXPORT_SYMBOL(rmnet_all_flows_enabled);

void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id)
{
	struct rmnet_endpoint *ep;
	unsigned long bkt;
	u8 current_num_bearers = 0;
	u8 number_bearers_left = 0;
	u8 num_bearers_in_out;

	if (unlikely(!port || !num_bearers))
		return;

	number_bearers_left = *num_bearers;

	rcu_read_lock();
	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
			  bkt, ep, hlnode) {
		num_bearers_in_out = number_bearers_left;
		qmi_rmnet_prepare_ps_bearers(ep->egress_dev,
					     &num_bearers_in_out,
					     bearer_id ? bearer_id +
						current_num_bearers : NULL);
		current_num_bearers += num_bearers_in_out;
		number_bearers_left -= num_bearers_in_out;
	}
	rcu_read_unlock();

	*num_bearers = current_num_bearers;
}
EXPORT_SYMBOL(rmnet_prepare_ps_bearers);

int rmnet_get_powersave_notif(void *port)
{
	if (!port)
+83 −2
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#include <net/pkt_sched.h>
@@ -11,6 +12,7 @@
#include "dfc_defs.h"

#define QMAP_DFC_VER		1
#define QMAP_PS_MAX_BEARERS	32

#define QMAP_CMD_DONE		-1

@@ -23,6 +25,7 @@
#define QMAP_DFC_IND		11
#define QMAP_DFC_QUERY		12
#define QMAP_DFC_END_MARKER	13
#define QMAP_DFC_POWERSAVE	14

struct qmap_hdr {
	u8	cd_pad;
@@ -121,6 +124,22 @@ struct qmap_dfc_end_marker_cnf {
	u32			reserved4;
} __aligned(1);

struct qmap_dfc_powersave_req {
	struct qmap_cmd_hdr	hdr;
	u8			cmd_ver;
	u8			allow:1;
	u8			autoshut:1;
	u8			reserved:6;
	u8			reserved2;
	u8			mode:1;
	u8			reserved3:7;
	__be32			ep_type;
	__be32			iface_id;
	u8			num_bearers;
	u8			bearer_id[QMAP_PS_MAX_BEARERS];
	u8			reserved4[3];
} __aligned(1);

static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
static struct dfc_qmi_data __rcu *qmap_dfc_data;
@@ -130,14 +149,16 @@ static void *rmnet_ctl_handle;
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
					 u8 bearer_id, u16 seq, u32 tx_id);

static void dfc_qmap_send_cmd(struct sk_buff *skb)
static int dfc_qmap_send_cmd(struct sk_buff *skb)
{
	trace_dfc_qmap(skb->data, skb->len, false);

	if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) {
		pr_err("Failed to send to rmnet ctl\n");
		kfree_skb(skb);
		return -ECOMM;
	}
	return 0;
}

static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
@@ -451,6 +472,65 @@ static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
	rmnet_map_tx_qmap_cmd(skb);
}

static int dfc_qmap_send_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
{
	struct sk_buff *skb;
	struct qmap_dfc_powersave_req *dfc_powersave;
	unsigned int len = sizeof(struct qmap_dfc_powersave_req);
	struct dfc_qmi_data *dfc;
	u32 ep_type = 0;
	u32 iface_id = 0;

	rcu_read_lock();
	dfc = rcu_dereference(qmap_dfc_data);
	if (dfc) {
		ep_type = dfc->svc.ep_type;
		iface_id = dfc->svc.iface_id;
	} else {
		rcu_read_unlock();
		return -EINVAL;
	}
	rcu_read_unlock();

	skb = alloc_skb(len, GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

	skb->protocol = htons(ETH_P_MAP);
	dfc_powersave = (struct qmap_dfc_powersave_req *)skb_put(skb, len);
	memset(dfc_powersave, 0, len);

	dfc_powersave->hdr.cd_bit = 1;
	dfc_powersave->hdr.mux_id = 0;
	dfc_powersave->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
	dfc_powersave->hdr.cmd_name = QMAP_DFC_POWERSAVE;
	dfc_powersave->hdr.cmd_type = QMAP_CMD_REQUEST;
	dfc_powersave->hdr.tx_id =  htonl(atomic_inc_return(&qmap_txid));

	dfc_powersave->cmd_ver = 3;
	dfc_powersave->mode = enable ? 1 : 0;

	if (enable && num_bearers) {
		if (unlikely(num_bearers > QMAP_PS_MAX_BEARERS))
			num_bearers = QMAP_PS_MAX_BEARERS;
		dfc_powersave->allow = 1;
		dfc_powersave->autoshut = 1;
		dfc_powersave->num_bearers = num_bearers;
		memcpy(dfc_powersave->bearer_id, bearer_id, num_bearers);
	}

	dfc_powersave->ep_type = htonl(ep_type);
	dfc_powersave->iface_id = htonl(iface_id);

	return dfc_qmap_send_cmd(skb);
}

int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
{
	trace_dfc_set_powersave_mode(enable);
	return dfc_qmap_send_powersave(enable, num_bearers, bearer_id);
}

void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
{
	struct rmnet_bearer_map *bearer;
@@ -499,6 +579,7 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,

	pr_info("DFC QMAP init\n");

	if (!qmi->ps_ext)
		dfc_qmap_send_config(data);

	return 0;
+15 −3
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#include <net/pkt_sched.h>
@@ -989,7 +990,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
	u32 adjusted_grant;

	itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
	if (!itm)

	/* cache the bearer assuming it is a new bearer */
	if (unlikely(!itm && !is_query && fc_info->num_bytes))
		itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);

	if (itm) {
@@ -1101,10 +1104,19 @@ void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,

		spin_lock_bh(&qos->qos_lock);

		/* In powersave, change grant to 1 if it is a enable */
		if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
			if (flow_status->num_bytes) {
				flow_status->num_bytes = DEFAULT_GRANT;
				flow_status->seq_num = 0;
				/* below is to reset bytes-in-flight */
				flow_status->rx_bytes_valid = 1;
				flow_status->rx_bytes = 0xFFFFFFFF;
			} else {
				spin_unlock_bh(&qos->qos_lock);
				continue;
			}
		}

		if (unlikely(flow_status->bearer_id == 0xFF))
			dfc_all_bearer_flow_ctl(
+146 −9
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#define FLAG_DFC_MASK 0x000F
#define FLAG_POWERSAVE_MASK 0x0010
#define FLAG_QMAP_MASK 0x0020
#define FLAG_PS_EXT_MASK 0x0040

#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)

@@ -35,9 +36,12 @@
	 (m) == DFC_MODE_SA)

#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
#define FLAG_TO_PS_EXT(f) ((f) & FLAG_PS_EXT_MASK)

int dfc_mode;
int dfc_qmap;
int dfc_ps_ext;

#define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)

unsigned int rmnet_wq_frequency __read_mostly = 1000;
@@ -388,12 +392,15 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
				bearer->mq_idx = itm->mq_idx;
		}

		qmi_rmnet_flow_control(dev, itm->mq_idx,
				       bearer->grant_size > 0 ? 1 : 0);

		/* Always enable flow for the newly associated bearer */
		if (!bearer->grant_size) {
			bearer->grant_size = DEFAULT_GRANT;
			bearer->grant_thresh =
				qmi_rmnet_grant_per(DEFAULT_GRANT);
		}
		qmi_rmnet_flow_control(dev, itm->mq_idx, 1);
		if (dfc_mode == DFC_MODE_SA)
			qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
					bearer->grant_size > 0 ? 1 : 0);
			qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, 1);
	}
}

@@ -615,6 +622,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
	}

	qmi->flag = tcm->tcm_ifindex;
	qmi->ps_ext = FLAG_TO_PS_EXT(qmi->flag);
	svc.instance = tcm->tcm_handle;
	svc.ep_type = tcm->tcm_info;
	svc.iface_id = tcm->tcm_parent;
@@ -716,6 +724,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
	case NLMSG_CLIENT_SETUP:
		dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
		dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
		dfc_ps_ext = FLAG_TO_PS_EXT(tcm->tcm_ifindex);

		if (!DFC_SUPPORTED_MODE(dfc_mode) &&
		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
@@ -852,6 +861,54 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
}
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);

/**
 * rmnet_prepare_ps_bearers - get disabled bearers and
 * reset enabled bearers
 */
void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
				  u8 *bearer_id)
{
	struct qos_info *qos;
	struct rmnet_bearer_map *bearer;
	u8 current_num_bearers = 0;
	u8 num_bearers_left = 0;

	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
	if (!qos || !num_bearers)
		return;

	spin_lock_bh(&qos->qos_lock);

	num_bearers_left = *num_bearers;

	list_for_each_entry(bearer, &qos->bearer_head, list) {
		if (bearer->grant_size) {
			bearer->seq = 0;
			bearer->ack_req = 0;
			bearer->bytes_in_flight = 0;
			bearer->tcp_bidir = false;
			bearer->rat_switch = false;
			qmi_rmnet_watchdog_remove(bearer);
			bearer->grant_size = DEFAULT_GRANT;
			bearer->grant_thresh =
				qmi_rmnet_grant_per(DEFAULT_GRANT);
		} else if (num_bearers_left) {
			if (bearer_id)
				bearer_id[current_num_bearers] =
					bearer->bearer_id;
			current_num_bearers++;
			num_bearers_left--;
		} else {
			pr_err("DFC: no bearer space\n");
		}
	}

	*num_bearers = current_num_bearers;

	spin_unlock_bh(&qos->qos_lock);
}
EXPORT_SYMBOL(qmi_rmnet_prepare_ps_bearers);

#ifdef CONFIG_QCOM_QMI_DFC
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
			      bool *drop)
@@ -1065,6 +1122,7 @@ static struct rmnet_powersave_work *rmnet_work;
static bool rmnet_work_quit;
static bool rmnet_work_inited;
static LIST_HEAD(ps_list);
static u8 ps_bearer_id[32];

struct rmnet_powersave_work {
	struct delayed_work work;
@@ -1279,6 +1337,78 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
	rcu_read_unlock();
}

static void qmi_rmnet_check_stats_2(struct work_struct *work)
{
	struct rmnet_powersave_work *real_work;
	struct qmi_info *qmi;
	u64 rxd, txd;
	u64 rx, tx;
	u8 num_bearers;

	real_work = container_of(to_delayed_work(work),
				 struct rmnet_powersave_work, work);

	if (unlikely(!real_work->port))
		return;

	qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
	if (unlikely(!qmi))
		return;

	if (qmi->ps_enabled) {

		/* Ready to accept grant */
		qmi->ps_ignore_grant = false;

		/* Out of powersave */
		if (dfc_qmap_set_powersave(0, 0, NULL))
			goto end;

		qmi->ps_enabled = false;

		if (rmnet_get_powersave_notif(real_work->port))
			qmi_rmnet_ps_off_notify(real_work->port);

		goto end;
	}

	rmnet_get_packets(real_work->port, &rx, &tx);
	rxd = rx - real_work->old_rx_pkts;
	txd = tx - real_work->old_tx_pkts;
	real_work->old_rx_pkts = rx;
	real_work->old_tx_pkts = tx;

	if (!rxd && !txd) {
		qmi->ps_ignore_grant = true;
		qmi->ps_enabled = true;
		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);

		/* Let other CPU's see this update so that packets are
		 * dropped, instead of further processing packets
		 */
		smp_mb();

		num_bearers = sizeof(ps_bearer_id);
		memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
		rmnet_prepare_ps_bearers(real_work->port, &num_bearers,
					 ps_bearer_id);

		/* Enter powersave */
		dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id);

		if (rmnet_get_powersave_notif(real_work->port))
			qmi_rmnet_ps_on_notify(real_work->port);

		return;
	}
end:
	rcu_read_lock();
	if (!rmnet_work_quit)
		alarm_start_relative(&real_work->atimer, PS_INTERVAL_KT);

	rcu_read_unlock();
}

static void qmi_rmnet_work_set_active(void *port, int status)
{
	struct qmi_info *qmi;
@@ -1310,15 +1440,20 @@ void qmi_rmnet_work_init(void *port)
		rmnet_ps_wq = NULL;
		return;
	}

	if (dfc_qmap && dfc_ps_ext)
		INIT_DEFERRABLE_WORK(&rmnet_work->work,
				     qmi_rmnet_check_stats_2);
	else
		INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);

	alarm_init(&rmnet_work->atimer, ALARM_BOOTTIME, qmi_rmnet_work_alarm);
	rmnet_work->port = port;
	rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
			  &rmnet_work->old_tx_pkts);

	rmnet_work_quit = false;
	qmi_rmnet_work_set_active(rmnet_work->port, 1);
	queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
	qmi_rmnet_work_set_active(rmnet_work->port, 0);
	rmnet_work_inited = true;
}
EXPORT_SYMBOL(qmi_rmnet_work_init);
@@ -1331,9 +1466,11 @@ void qmi_rmnet_work_maybe_restart(void *port)
	if (unlikely(!qmi || !rmnet_work_inited))
		return;

	if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active))
	if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active)) {
		qmi->ps_ignore_grant = false;
		qmi_rmnet_work_restart(port);
	}
}
EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);

void qmi_rmnet_work_exit(void *port)
+7 −0
Original line number Diff line number Diff line
@@ -102,6 +102,7 @@ struct qmi_info {
	bool ps_ignore_grant;
	bool wakelock_active;
	struct wakeup_source *ws;
	int ps_ext;
};

enum data_ep_type_enum_v01 {
@@ -219,6 +220,7 @@ wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
void wda_qmi_client_exit(void *wda_data);
int wda_set_powersave_mode(void *wda_data, u8 enable);
void qmi_rmnet_flush_ps_wq(void);
int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id);
#else
static inline int
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
@@ -237,5 +239,10 @@ static inline int wda_set_powersave_mode(void *wda_data, u8 enable)
static inline void qmi_rmnet_flush_ps_wq(void)
{
}
static inline int dfc_qmap_set_powersave(u8 enable, u8 num_bearers,
		u8 *bearer_id)
{
	return -EINVAL;
}
#endif
#endif /*_RMNET_QMI_I_H*/
Loading