Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d3928121 authored by Jinesh K. Jayakumar's avatar Jinesh K. Jayakumar Committed by Gerrit - the friendly Code Review server
Browse files

msm: ipa: eth: Use a wake lock of 500 ms on device activity



Any time we receive a packet from ethernet client, we need to keep APPS
awake for various user space services to process the packet and send any
responses back. Use a wake lock timer of 500 ms to keep the system awake
when rx activity is discovered between two autosleep attempts.

Change-Id: I587075a8a5351c287f23879dda1ebd558f9aa421
Signed-off-by: default avatarJinesh K. Jayakumar <jineshk@codeaurora.org>
parent 53817a30
Loading
Loading
Loading
Loading
+66 −36
Original line number Diff line number Diff line
@@ -11,7 +11,6 @@
 */

#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/timer.h>

#include "ipa_eth_i.h"
@@ -28,9 +27,14 @@ MODULE_PARM_DESC(ipa_eth_noauto,

static struct workqueue_struct *ipa_eth_wq;

bool ipa_eth_ready(void)
bool ipa_eth_is_ready(void)
{
	return test_bit(IPA_ETH_ST_READY, &ipa_eth_state) &&
	return test_bit(IPA_ETH_ST_READY, &ipa_eth_state);
}

bool ipa_eth_all_ready(void)
{
	return ipa_eth_is_ready() &&
		test_bit(IPA_ETH_ST_UC_READY, &ipa_eth_state) &&
		test_bit(IPA_ETH_ST_IPA_READY, &ipa_eth_state);
}
@@ -51,7 +55,7 @@ static inline bool reachable(struct ipa_eth_device *eth_dev)
static inline bool offloadable(struct ipa_eth_device *eth_dev)
{
	return
		ipa_eth_ready() &&
		ipa_eth_all_ready() &&
		reachable(eth_dev) &&
		!test_bit(IPA_ETH_DEV_F_UNPAIRING, &eth_dev->flags);
}
@@ -378,7 +382,7 @@ static void ipa_eth_global_refresh_work(struct work_struct *work)

	mutex_lock(&ipa_eth_devices_lock);

	if (ipa_eth_ready()) {
	if (ipa_eth_all_ready()) {
		list_for_each_entry(eth_dev, &ipa_eth_devices, device_list) {
			ipa_eth_device_refresh_sched(eth_dev);
		}
@@ -550,6 +554,52 @@ static int ipa_eth_panic_notifier(struct notifier_block *nb,
	return NOTIFY_DONE;
}

/* During a system suspend, suspend-prepare callbacks are called first by the
 * PM framework before freezing processes. This gives us an early opportunity
 * to abort the suspend and reduces the chances for device resumes at a later
 * stage.
 */
static int ipa_eth_pm_notifier_event_suspend_prepare(
	struct ipa_eth_device *eth_dev)
{
	/* We look for any ethernet rx activity since previous attempt to
	 * suspend, and if such an activity is found, we hold a wake lock
	 * for WAKE_TIME_MS time. Any Rx packets received beyond this point
	 * should cause a wake up due to the Rx interrupt. In rare cases
	 * where Rx interrupt was already processed and NAPI poll is yet to
	 * complete, we perform a second check in the suspend late handler
	 * and reverts the device suspension by aborting the system suspend.
	 */
	if (ipa_eth_net_check_active(eth_dev)) {
		pr_info("%s: %s is active, preventing suspend for some time",
				IPA_ETH_SUBSYS, eth_dev->net_dev->name);
		ipa_eth_dev_wakeup_event(eth_dev);
		return NOTIFY_BAD;
	}

	return NOTIFY_OK;
}

static int ipa_eth_pm_notifier_cb(struct notifier_block *nb,
	unsigned long pm_event, void *unused)
{
	struct ipa_eth_device_private *ipa_priv = container_of(nb,
				struct ipa_eth_device_private, pm_nb);
	struct ipa_eth_device *eth_dev = ipa_priv->eth_dev;

	ipa_eth_dbg("PM notifier called for event %s (0x%04lx)",
			ipa_eth_pm_notifier_event_name(pm_event), pm_event);

	switch (pm_event) {
	case PM_SUSPEND_PREPARE:
		return ipa_eth_pm_notifier_event_suspend_prepare(eth_dev);
	default:
		break;
	}

	return NOTIFY_DONE;
}

/*
 * ipa_eth_alloc_device() - Allocate an ipa_eth_device structure and initialize
 *                          all common fields
@@ -607,6 +657,7 @@ struct ipa_eth_device *ipa_eth_alloc_device(

	INIT_LIST_HEAD(&ipa_priv->upper_devices);

	ipa_priv->pm_nb.notifier_call = ipa_eth_pm_notifier_cb;
	ipa_priv->panic_nb.notifier_call = ipa_eth_panic_notifier;

	eth_dev->ipa_priv = ipa_priv;
@@ -658,6 +709,8 @@ int ipa_eth_register_device(struct ipa_eth_device *eth_dev)
	list_add(&eth_dev->device_list, &ipa_eth_devices);
	mutex_unlock(&ipa_eth_devices_lock);

	(void) register_pm_notifier(&ipa_priv->pm_nb);

	ipa_eth_dev_log(eth_dev, "Registered new device");

	rc = ipa_eth_offload_pair_device(eth_dev);
@@ -725,6 +778,8 @@ void ipa_eth_unregister_device(struct ipa_eth_device *eth_dev)
	 */
	ipa_eth_unpair_device(eth_dev);

	(void) unregister_pm_notifier(&ipa_priv->pm_nb);

	/* Remove from devices list so that no new global refreshes are
	 * scheduled.
	 */
@@ -833,33 +888,17 @@ void ipa_eth_unregister_offload_driver(struct ipa_eth_offload_driver *od)
}
EXPORT_SYMBOL(ipa_eth_unregister_offload_driver);

static int ipa_eth_pm_notifier_cb(struct notifier_block *nb,
	unsigned long pm_event, void *unused)
{
	ipa_eth_log("PM notifier called for event %lu", pm_event);

	/* Permissible offload states for a device can change due to certain
	 * wake up events. Ex. if start_on_wakeup property is set for a device,
	 * the eth_dev->start will be set to true during eth bus resume. Do a
	 * global refresh on all devices to update their offload state based on
	 * any such changes in permissible offload states that may have occurred
	 * during resume.
	 */
	if (pm_event == PM_POST_SUSPEND)
		ipa_eth_global_refresh_sched();

	return NOTIFY_DONE;
}

static struct notifier_block pm_notifier = {
	.notifier_call = ipa_eth_pm_notifier_cb,
};

int ipa_eth_init(void)
{
	int rc;
	unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM;

	/* Freeze the workqueue so that a refresh will not happen while the
	 * device is suspended as the suspend operation itself can generate
	 * Netlink events.
	 */
	wq_flags |= WQ_FREEZABLE;

	rc = ipa_eth_ipc_log_init();
	if (rc) {
		ipa_eth_err("Failed to initialize IPC logging");
@@ -886,12 +925,6 @@ int ipa_eth_init(void)
		goto err_dbgfs;
	}

	rc = register_pm_notifier(&pm_notifier);
	if (rc) {
		ipa_eth_err("Failed to register for PM notification");
		goto err_pm_notifier;
	}

	rc = ipa3_uc_register_ready_cb(&uc_ready_cb);
	if (rc) {
		ipa_eth_err("Failed to register for uC ready cb");
@@ -920,8 +953,6 @@ int ipa_eth_init(void)
err_ipa:
	ipa3_uc_unregister_ready_cb(&uc_ready_cb);
err_uc:
	unregister_pm_notifier(&pm_notifier);
err_pm_notifier:
	ipa_eth_debugfs_cleanup();
err_dbgfs:
	ipa_eth_bus_modexit();
@@ -950,7 +981,6 @@ void ipa_eth_exit(void)

	ipa3_uc_unregister_ready_cb(&uc_ready_cb);

	unregister_pm_notifier(&pm_notifier);
	ipa_eth_debugfs_cleanup();

	/* Wait for all offload paths to deinit. Although the chances for any
+2 −2
Original line number Diff line number Diff line
/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -231,7 +231,7 @@ static ssize_t eth_dev_ready_read(struct file *file, char __user *user_buf,
			"Ready" : "Not Ready");

	n += scnprintf(&buf[n], size - n, "ALL: %s\n",
		ipa_eth_ready() ? "Ready" : "Not Ready");
		ipa_eth_all_ready() ? "Ready" : "Not Ready");

	n = simple_read_from_buffer(user_buf, count, ppos, buf, n);

+22 −1
Original line number Diff line number Diff line
@@ -13,6 +13,8 @@
#ifndef _IPA_ETH_I_H_
#define _IPA_ETH_I_H_

#include <linux/suspend.h>

#define IPA_ETH_NET_DRIVER
#define IPA_ETH_OFFLOAD_DRIVER
#include <linux/ipa_eth.h>
@@ -33,10 +35,13 @@
#define IPA_ETH_IPC_LOGDBG_DEFAULT false
#endif

#define IPA_ETH_WAKE_TIME_MS 500

#define IPA_ETH_PFDEV (ipa3_ctx ? ipa3_ctx->pdev : NULL)
#define IPA_ETH_SUBSYS "ipa_eth"

#define IPA_ETH_NET_DEVICE_MAX_EVENTS (NETDEV_CHANGE_TX_QUEUE_LEN + 1)
#define IPA_ETH_PM_NOTIFIER_MAX_EVENTS (PM_POST_RESTORE + 1)

enum ipa_eth_states {
	IPA_ETH_ST_READY,
@@ -117,9 +122,16 @@ struct ipa_eth_device_private {

	struct list_head upper_devices;

	unsigned long assume_active;
	struct rtnl_link_stats64 last_rtnl_stats;

	struct notifier_block pm_nb;
	struct notifier_block panic_nb;
};

#define eth_dev_priv(eth_dev) \
		((struct ipa_eth_device_private *)((eth_dev)->ipa_priv))

struct ipa_eth_bus {
	struct list_head bus_list;

@@ -146,7 +158,13 @@ extern unsigned long ipa_eth_state;
extern bool ipa_eth_noauto;
extern bool ipa_eth_ipc_logdbg;

bool ipa_eth_ready(void);
bool ipa_eth_is_ready(void);
bool ipa_eth_all_ready(void);

static inline void ipa_eth_dev_wakeup_event(struct ipa_eth_device *eth_dev)
{
	pm_wakeup_dev_event(eth_dev->dev, IPA_ETH_WAKE_TIME_MS, false);
}

struct ipa_eth_device *ipa_eth_alloc_device(
	struct device *dev,
@@ -191,6 +209,8 @@ int ipa_eth_offload_save_regs(struct ipa_eth_device *eth_dev);
int ipa_eth_offload_prepare_reset(struct ipa_eth_device *eth_dev, void *data);
int ipa_eth_offload_complete_reset(struct ipa_eth_device *eth_dev, void *data);

bool ipa_eth_net_check_active(struct ipa_eth_device *eth_dev);

int ipa_eth_net_register_driver(struct ipa_eth_net_driver *nd);
void ipa_eth_net_unregister_driver(struct ipa_eth_net_driver *nd);
int ipa_eth_net_register_upper(struct ipa_eth_device *eth_dev);
@@ -232,6 +252,7 @@ int ipa_eth_uc_stats_stop(struct ipa_eth_device *eth_dev);

const char *ipa_eth_device_event_name(enum ipa_eth_device_event event);
const char *ipa_eth_net_device_event_name(unsigned long event);
const char *ipa_eth_pm_notifier_event_name(unsigned long event);

int ipa_eth_send_msg_connect(struct net_device *net_dev);
int ipa_eth_send_msg_disconnect(struct net_device *net_dev);
+26 −0
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include <linux/rtnetlink.h>

#include "ipa_eth_i.h"
#include "ipa_eth_trace.h"

#define ipa_eth_nd_op(eth_dev, op, args...) (eth_dev->nd->ops->op(args))

@@ -99,6 +100,31 @@ static inline bool __is_netdev_iface_up(struct ipa_eth_device *eth_dev)
	return !!(eth_dev->net_dev->flags & IFF_UP);
}

bool ipa_eth_net_check_active(struct ipa_eth_device *eth_dev)
{
	bool active = false;
	struct rtnl_link_stats64 curr_rtnl_stats;
	struct ipa_eth_device_private *ipa_priv = eth_dev_priv(eth_dev);
	struct rtnl_link_stats64 *last_rtnl_stats = &ipa_priv->last_rtnl_stats;

	dev_get_stats(eth_dev->net_dev, &curr_rtnl_stats);

	trace_net_check_active(eth_dev,
		last_rtnl_stats, &curr_rtnl_stats, ipa_priv->assume_active);

	if (ipa_priv->assume_active) {
		ipa_priv->assume_active--;
		active = true;
	}

	if (curr_rtnl_stats.rx_packets != last_rtnl_stats->rx_packets)
		active = true;

	*last_rtnl_stats = curr_rtnl_stats;

	return active;
}

/* Event handler for netdevice events from upper interfaces */
static int ipa_eth_net_upper_event(
	struct ipa_eth_upper_device *upper_dev,
+81 −24
Original line number Diff line number Diff line
/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -95,6 +95,11 @@ static struct ipa_eth_device *lookup_eth_dev(struct pci_dev *pdev)
	return eth_dev;
}

static struct ipa_eth_device *dev_to_eth_dev(struct device *dev)
{
	return lookup_eth_dev(to_pci_dev(dev));
}

static bool is_driver_used(struct ipa_eth_pci_driver *eth_pdrv)
{
	bool in_use = false;
@@ -123,11 +128,10 @@ static void ipa_eth_pcie_event_wakeup(struct pci_dev *pdev)
		return;
	}

	/* Just set the flag here. ipa_eth_pm_notifier_cb() will later
	 * schedule global refresh.
	 */
	if (eth_dev->start_on_wakeup)
	if (eth_dev->start_on_wakeup && !eth_dev->start) {
		eth_dev->start = true;
		ipa_eth_device_refresh_sched(eth_dev);
	}
}

static void ipa_eth_pcie_event_cb(struct msm_pcie_notify *notify)
@@ -254,9 +258,9 @@ static int ipa_eth_pci_probe_handler(struct pci_dev *pdev,
	ipa_eth_dbg("PCI probe called for %s driver with devfn %u",
		    pdev->driver->name, pdev->devfn);

	if (!ipa_eth_pci_is_ready) {
		ipa_eth_err("Offload sub-system PCI module is not initialized");
		ipa_eth_err("PCI probe for device is deferred");
	if (!ipa_eth_is_ready()) {
		ipa_eth_log(
			"Offload sub-system not initialized, deferring probe");
		return -EPROBE_DEFER;
	}

@@ -311,16 +315,22 @@ static int ipa_eth_pci_suspend_handler(struct device *dev)
{
	int rc = 0;
	struct ipa_eth_device *eth_dev;
	struct pci_dev *pci_dev = to_pci_dev(dev);

	eth_dev = lookup_eth_dev(pci_dev);
	eth_dev = dev_to_eth_dev(dev);
	if (!eth_dev) {
		ipa_eth_bug("Failed to lookup pci_dev -> eth_dev");
		return -EFAULT;
	}

	if (work_pending(&eth_dev->refresh))
	if (work_pending(&eth_dev->refresh)) {
		ipa_eth_dev_log(eth_dev,
			"Refresh work is pending, aborting suspend");

		/* Just abort suspend. Since the wq is freezable, the work item
		 * would get flushed before we get called again.
		 */
		return -EAGAIN;
	}

	/* When offload is started, PCI power collapse is already disabled by
	 * the ipa_eth_pci_disable_pc() api. Nonetheless, we still need to do
@@ -328,9 +338,9 @@ static int ipa_eth_pci_suspend_handler(struct device *dev)
	 * itself perform a config space save-restore.
	 */
	if (eth_dev->of_state == IPA_ETH_OF_ST_STARTED) {
		ipa_eth_dev_log(eth_dev,
		ipa_eth_dev_dbg(eth_dev,
			"Device suspend performing dummy config space save");
		rc = pci_save_state(pci_dev);
		rc = pci_save_state(to_pci_dev(dev));
	} else {
		ipa_eth_dev_log(eth_dev,
			"Device suspend delegated to net driver");
@@ -338,13 +348,61 @@ static int ipa_eth_pci_suspend_handler(struct device *dev)
	}

	if (rc)
		ipa_eth_dev_log(eth_dev, "Device suspend failed");
		ipa_eth_dev_err(eth_dev, "Device suspend failed");
	else
		ipa_eth_dev_log(eth_dev, "Device suspend complete");
		ipa_eth_dev_dbg(eth_dev, "Device suspend complete");

	return rc;
}

static int ipa_eth_pci_suspend_late_handler(struct device *dev)
{
	struct ipa_eth_device *eth_dev;

	eth_dev = dev_to_eth_dev(dev);
	if (!eth_dev) {
		ipa_eth_bug("Failed to lookup pci_dev -> eth_dev");
		return -EFAULT;
	}

	/* In rare case where we detect some interface activity between the
	 * time PM_SUSPEND_PREPARE event was processed and the device was
	 * actually frozen, abort the suspend operation.
	 */
	if (ipa_eth_net_check_active(eth_dev)) {
		pr_info("%s: %s shows late stage activity, preventing suspend",
				IPA_ETH_SUBSYS, eth_dev->net_dev->name);

		/* Have PM_SUSPEND_PREPARE give us one wakeup time quanta */
		eth_dev_priv(eth_dev)->assume_active++;

		return -EAGAIN;
	}

	return 0;
}

static int ipa_eth_pci_resume_early_handler(struct device *dev)
{
	struct ipa_eth_device *eth_dev;

	eth_dev = dev_to_eth_dev(dev);
	if (!eth_dev) {
		ipa_eth_bug("Failed to lookup pci_dev -> eth_dev");
		return -EFAULT;
	}

	/* We cannot check start_on_resume in the resume handler as it can get
	 * invoked also if .suspend_late() aborts due to interface activity.
	 */
	if (eth_dev->start_on_resume && !eth_dev->start) {
		eth_dev->start = true;
		ipa_eth_device_refresh_sched(eth_dev);
	}

	return 0;
}

static int ipa_eth_pci_resume_handler(struct device *dev)
{
	int rc = 0;
@@ -357,30 +415,27 @@ static int ipa_eth_pci_resume_handler(struct device *dev)
		return -EFAULT;
	}

	/* Just set the flag here. ipa_eth_pm_notifier_cb() will later schedule
	 * global refresh.
	 */
	if (eth_dev->start_on_resume)
		eth_dev->start = true;

	/* During suspend, RC power collapse would not have happened if offload
	 * was started. Ignore resume callback since the device does not need
	 * to be re-initialized.
	 */
	if (eth_dev->of_state == IPA_ETH_OF_ST_STARTED) {
		ipa_eth_dev_log(eth_dev,
		ipa_eth_dev_dbg(eth_dev,
			"Device resume performing nop");
		rc = 0;
	} else {
		ipa_eth_dev_log(eth_dev,
			"Device resume delegated to net driver");
		rc = eth_dev_pm_ops(eth_dev)->resume(dev);

		/* Give some time after a resume for the device to settle */
		eth_dev_priv(eth_dev)->assume_active++;
	}

	if (rc)
		ipa_eth_dev_log(eth_dev, "Device resume failed");
		ipa_eth_dev_err(eth_dev, "Device resume failed");
	else
		ipa_eth_dev_log(eth_dev, "Device resume complete");
		ipa_eth_dev_dbg(eth_dev, "Device resume complete");

	return 0;
}
@@ -390,6 +445,8 @@ static int ipa_eth_pci_resume_handler(struct device *dev)
 */
static const struct dev_pm_ops ipa_eth_pci_pm_ops = {
	.suspend = ipa_eth_pci_suspend_handler,
	.suspend_late = ipa_eth_pci_suspend_late_handler,
	.resume_early = ipa_eth_pci_resume_early_handler,
	.resume = ipa_eth_pci_resume_handler,
};

Loading