Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d8dbb54 authored by Jinesh K. Jayakumar's avatar Jinesh K. Jayakumar
Browse files

msm: ipa: Support for offload autostart in offload sub-system



Add support in IPA offload sub-system to automatically start offload for
its clients upon either wake up from suspend or timer expiry.

Change-Id: I2f3d83675eaa4d4b2a60461cd6ad259f663f30bb
Signed-off-by: default avatarJinesh K. Jayakumar <jineshk@codeaurora.org>
parent ace36422
Loading
Loading
Loading
Loading
+70 −1
Original line number Diff line number Diff line
@@ -13,6 +13,8 @@
#include <linux/printk.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/timer.h>

#include <linux/msm_ipa.h>

@@ -329,6 +331,17 @@ static void ipa_eth_refresh_devices(void)
	queue_work(ipa_eth_wq, &global_refresh);
}

static void ipa_eth_dev_start_timer_cb(unsigned long data)
{
	struct ipa_eth_device *eth_dev = (struct ipa_eth_device *)data;

	/* Do not start offload if user disabled start_on_timeout in between */
	if (eth_dev && eth_dev->start_on_timeout)
		eth_dev->start = true;

	ipa_eth_refresh_device(eth_dev);
}

static int ipa_eth_netdev_event_change(struct ipa_eth_device *eth_dev)
{
	bool refresh_needed = netif_carrier_ok(eth_dev->net_dev) ?
@@ -420,6 +433,13 @@ static ssize_t ipa_eth_dev_write_start(struct file *file,
						      struct ipa_eth_device,
						      start);

	/* Set/reset timer to automatically start offload after the timeout
	 * specified in eth_dev->start_on_timeout (milliseconds) expires.
	 */
	if (!eth_dev->start && eth_dev->start_on_timeout)
		mod_timer(&eth_dev->start_timer,
			jiffies + msecs_to_jiffies(eth_dev->start_on_timeout));

	ipa_eth_refresh_device(eth_dev);

	return ret;
@@ -529,6 +549,15 @@ static int ipa_eth_device_debugfs_create(struct ipa_eth_device *eth_dev)
	debugfs_create_file("start", 0644, eth_dev->debugfs, &eth_dev->start,
			    &fops_eth_dev_start);

	debugfs_create_bool("start_on_wakeup", 0644,
			    eth_dev->debugfs, &eth_dev->start_on_wakeup);

	debugfs_create_bool("start_on_resume", 0644,
			    eth_dev->debugfs, &eth_dev->start_on_resume);

	debugfs_create_u32("start_on_timeout", 0644, eth_dev->debugfs,
			    &eth_dev->start_on_timeout);

	debugfs_create_file("stats", 0644, eth_dev->debugfs, eth_dev,
			    &fops_eth_dev_stats);

@@ -584,6 +613,13 @@ static void __ipa_eth_unpair_device(struct ipa_eth_device *eth_dev)

	ipa_eth_device_debugfs_remove(eth_dev);

	eth_dev->start_on_wakeup = false;
	eth_dev->start_on_resume = false;
	eth_dev->start_on_timeout = 0;
	del_timer_sync(&eth_dev->start_timer);

	flush_work(&eth_dev->refresh);

	eth_dev->init = eth_dev->start = false;

	ipa_eth_refresh_device(eth_dev);
@@ -641,6 +677,11 @@ int ipa_eth_register_device(struct ipa_eth_device *eth_dev)
	INIT_LIST_HEAD(&eth_dev->rx_channels);
	INIT_LIST_HEAD(&eth_dev->tx_channels);

	init_timer(&eth_dev->start_timer);

	eth_dev->start_timer.function = ipa_eth_dev_start_timer_cb;
	eth_dev->start_timer.data = (unsigned long)eth_dev;

	eth_dev->init = eth_dev->start = !ipa_eth_noauto;

	rc = ipa_eth_net_open_device(eth_dev);
@@ -1041,10 +1082,28 @@ static struct notifier_block ipa_eth_panic_nb = {
	.notifier_call  = ipa_eth_panic_notifier,
};

static int ipa_eth_pm_notifier_cb(struct notifier_block *nb,
	unsigned long pm_event, void *unused)
{
	ipa_eth_log("PM notifier called for event %lu", pm_event);

	switch (pm_event) {
	case PM_POST_SUSPEND:
		ipa_eth_refresh_devices();
		break;
	}

	return NOTIFY_DONE;
}

static struct notifier_block pm_notifier = {
	.notifier_call = ipa_eth_pm_notifier_cb,
};

int ipa_eth_init(void)
{
	int rc;
	unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE;
	unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM;

	(void) atomic_notifier_chain_register(
			&panic_notifier_list, &ipa_eth_panic_nb);
@@ -1081,6 +1140,12 @@ int ipa_eth_init(void)
		goto err_offload;
	}

	rc = register_pm_notifier(&pm_notifier);
	if (rc) {
		ipa_eth_err("Failed to register for PM notification");
		goto err_pm_notifier;
	}

	rc = ipa3_uc_register_ready_cb(&uc_ready_cb);
	if (rc) {
		ipa_eth_err("Failed to register for uC ready cb");
@@ -1106,6 +1171,8 @@ int ipa_eth_init(void)
err_ipa:
	ipa3_uc_unregister_ready_cb(&uc_ready_cb);
err_uc:
	unregister_pm_notifier(&pm_notifier);
err_pm_notifier:
	ipa_eth_offload_modexit();
err_offload:
	ipa_eth_bus_modexit();
@@ -1131,6 +1198,8 @@ void ipa_eth_exit(void)
	// IPA ready CB can not be unregistered; just unregister uC ready CB
	ipa3_uc_unregister_ready_cb(&uc_ready_cb);

	unregister_pm_notifier(&pm_notifier);

	ipa_eth_offload_modexit();
	ipa_eth_bus_modexit();

+94 −18
Original line number Diff line number Diff line
@@ -30,6 +30,15 @@ struct ipa_eth_pci_driver {
	const struct dev_pm_ops *pm_ops_real;
};

struct ipa_eth_pci_device {
	struct ipa_eth_device *eth_dev;
	struct ipa_eth_pci_driver *epci_drv;
	struct msm_pcie_register_event pcie_event;
};

#define eth_dev_pm_ops(edev) \
	(((struct ipa_eth_pci_device *)edev->bus_priv)->epci_drv->pm_ops_real)

static LIST_HEAD(pci_drivers);
static DEFINE_MUTEX(pci_drivers_mutex);

@@ -62,7 +71,8 @@ static int ipa_eth_pci_debugfs_init(struct dentry *dbgfs_root)
	return 0;
}

static struct ipa_eth_pci_driver *__lookup_driver(struct pci_driver *pci_drv)
static struct ipa_eth_pci_driver *__lookup_epci_driver(
					struct pci_driver *pci_drv)
{
	struct ipa_eth_pci_driver *epci_drv;

@@ -79,7 +89,7 @@ static struct ipa_eth_pci_driver *lookup_epci_driver(struct pci_driver *pci_drv)
	struct ipa_eth_pci_driver *epci_drv;

	mutex_lock(&pci_drivers_mutex);
	epci_drv = __lookup_driver(pci_drv);
	epci_drv = __lookup_epci_driver(pci_drv);
	mutex_unlock(&pci_drivers_mutex);

	return epci_drv;
@@ -109,12 +119,39 @@ static struct ipa_eth_device *lookup_eth_dev(struct pci_dev *pdev)
	return eth_dev;
}

static void ipa_eth_pcie_event_cb(struct msm_pcie_notify *notify)
{
	struct pci_dev *pdev = notify->user;
	struct ipa_eth_device *eth_dev;

	eth_dev = __lookup_eth_dev(pdev);
	if (!eth_dev) {
		ipa_eth_bug("Failed to lookup eth device");
		return;
	}

	ipa_eth_dev_log(eth_dev, "Received PCIe event %d", notify->event);

	switch (notify->event) {
	case MSM_PCIE_EVENT_WAKEUP:
		/* Just set the flag here. ipa_eth_pm_notifier_cb() will later
		 * schedule global refresh.
		 */
		if (eth_dev->start_on_wakeup)
			eth_dev->start = true;
		break;
	default:
		break;
	}
}

static int ipa_eth_pci_probe_handler(struct pci_dev *pdev,
				     const struct pci_device_id *id)
{
	int rc = 0;
	struct device *dev = &pdev->dev;
	struct ipa_eth_device *eth_dev;
	struct ipa_eth_pci_device *epci_dev;
	struct ipa_eth_pci_driver *epci_drv;

	ipa_eth_dbg("PCI probe called for %s driver with devfn %u",
@@ -127,6 +164,10 @@ static int ipa_eth_pci_probe_handler(struct pci_dev *pdev,
	}

	epci_drv = lookup_epci_driver(pdev->driver);
	if (!epci_drv) {
		ipa_eth_bug("Failed to lookup epci driver");
		return -EFAULT;
	}

	rc = epci_drv->probe_real(pdev, id);
	if (rc) {
@@ -137,12 +178,33 @@ static int ipa_eth_pci_probe_handler(struct pci_dev *pdev,
	eth_dev = devm_kzalloc(dev, sizeof(*eth_dev), GFP_KERNEL);
	if (!eth_dev) {
		rc = -ENOMEM;
		goto err_alloc;
		goto err_alloc_edev;
	}

	eth_dev->dev = dev;
	eth_dev->nd = epci_drv->nd;
	eth_dev->bus_priv = epci_drv;

	epci_dev = devm_kzalloc(dev, sizeof(*epci_dev), GFP_KERNEL);
	if (!epci_dev) {
		rc = -ENOMEM;
		goto err_alloc_epdev;
	}

	eth_dev->bus_priv = epci_dev;

	epci_dev->eth_dev = eth_dev;
	epci_dev->epci_drv = epci_drv;

	epci_dev->pcie_event.events = MSM_PCIE_EVENT_WAKEUP;
	epci_dev->pcie_event.user = pdev;
	epci_dev->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
	epci_dev->pcie_event.callback = ipa_eth_pcie_event_cb;

	rc = msm_pcie_register_event(&epci_dev->pcie_event);
	if (rc) {
		ipa_eth_dev_err(eth_dev, "Failed to register for PCIe event");
		goto err_register_pcie;
	}

	rc = ipa_eth_register_device(eth_dev);
	if (rc) {
@@ -157,9 +219,12 @@ static int ipa_eth_pci_probe_handler(struct pci_dev *pdev,
	return 0;

err_register:
	memset(eth_dev, 0, sizeof(*eth_dev));
	msm_pcie_deregister_event(&epci_dev->pcie_event);
err_register_pcie:
	devm_kfree(dev, epci_dev);
err_alloc_epdev:
	devm_kfree(dev, eth_dev);
err_alloc:
err_alloc_edev:
	epci_drv->remove_real(pdev);
err_probe:
	return rc;
@@ -169,7 +234,7 @@ static void ipa_eth_pci_remove_handler(struct pci_dev *pdev)
{
	struct device *dev = &pdev->dev;
	struct ipa_eth_device *eth_dev = NULL;
	struct ipa_eth_pci_driver *epci_drv = NULL;
	struct ipa_eth_pci_device *epci_dev = NULL;

	ipa_eth_dbg("PCI remove called for %s driver with devfn %u",
		    pdev->driver->name, pdev->devfn);
@@ -184,10 +249,15 @@ static void ipa_eth_pci_remove_handler(struct pci_dev *pdev)
	list_del(&eth_dev->bus_device_list);
	mutex_unlock(&pci_devices_mutex);

	epci_dev = eth_dev->bus_priv;

	ipa_eth_unregister_device(eth_dev);
	msm_pcie_deregister_event(&epci_dev->pcie_event);

	epci_drv = eth_dev->bus_priv;
	epci_drv->remove_real(pdev);
	epci_dev->epci_drv->remove_real(pdev);

	memset(epci_dev, 0, sizeof(*epci_dev));
	devm_kfree(dev, epci_dev);

	memset(eth_dev, 0, sizeof(*eth_dev));
	devm_kfree(dev, eth_dev);
@@ -197,7 +267,6 @@ static int ipa_eth_pci_suspend_handler(struct device *dev)
{
	int rc = 0;
	struct ipa_eth_device *eth_dev;
	const struct dev_pm_ops *pm_ops_real;
	struct pci_dev *pci_dev = to_pci_dev(dev);

	eth_dev = lookup_eth_dev(pci_dev);
@@ -206,8 +275,8 @@ static int ipa_eth_pci_suspend_handler(struct device *dev)
		return -EFAULT;
	}

	pm_ops_real =
		((struct ipa_eth_pci_driver *)eth_dev->bus_priv)->pm_ops_real;
	if (work_pending(&eth_dev->refresh))
		return -EAGAIN;

	/* When offload is started, PCI power collapse is already disabled by
	 * the ipa_eth_pci_disable_pc() api. Nonetheless, we still need to do
@@ -221,7 +290,7 @@ static int ipa_eth_pci_suspend_handler(struct device *dev)
	} else {
		ipa_eth_dev_log(eth_dev,
			"Device suspend delegated to net driver");
		rc = pm_ops_real->suspend(dev);
		rc = eth_dev_pm_ops(eth_dev)->suspend(dev);
	}

	if (rc)
@@ -236,7 +305,6 @@ static int ipa_eth_pci_resume_handler(struct device *dev)
{
	int rc = 0;
	struct ipa_eth_device *eth_dev;
	const struct dev_pm_ops *pm_ops_real;
	struct pci_dev *pci_dev = to_pci_dev(dev);

	eth_dev = lookup_eth_dev(pci_dev);
@@ -245,8 +313,11 @@ static int ipa_eth_pci_resume_handler(struct device *dev)
		return -EFAULT;
	}

	pm_ops_real =
		((struct ipa_eth_pci_driver *)eth_dev->bus_priv)->pm_ops_real;
	/* Just set the flag here. ipa_eth_pm_notifier_cb() will later schedule
	 * global refresh.
	 */
	if (eth_dev->start_on_resume)
		eth_dev->start = true;

	/* During suspend, RC power collapse would not have happened if offload
	 * was started. Ignore resume callback since the device does not need
@@ -259,7 +330,7 @@ static int ipa_eth_pci_resume_handler(struct device *dev)
	} else {
		ipa_eth_dev_log(eth_dev,
			"Device resume delegated to net driver");
		rc = pm_ops_real->resume(dev);
		rc = eth_dev_pm_ops(eth_dev)->resume(dev);
	}

	if (rc)
@@ -271,7 +342,7 @@ static int ipa_eth_pci_resume_handler(struct device *dev)
}

/* MSM PCIe driver invokes only suspend and resume callbacks, other operations
 * can be ignored.
 * can be ignored unless we see a client requiring the feature.
 */
static const struct dev_pm_ops ipa_eth_pci_pm_ops = {
	.suspend = ipa_eth_pci_suspend_handler,
@@ -331,6 +402,11 @@ static void ipa_eth_pci_unregister_net_driver(struct ipa_eth_net_driver *nd)
	struct pci_driver *pci_drv = to_pci_driver(nd->driver);
	struct ipa_eth_pci_driver *epci_drv = lookup_epci_driver(pci_drv);

	if (!epci_drv) {
		ipa_eth_bug("Failed to lookup epci driver");
		return;
	}

	mutex_lock(&pci_drivers_mutex);
	list_del(&epci_drv->driver_list);
	mutex_unlock(&pci_drivers_mutex);
+10 −0
Original line number Diff line number Diff line
@@ -436,6 +436,10 @@ struct ipa_eth_channel {
 *                network device (to monitor link state changes)
 * @init: Allowed to initialize offload path for the device
 * @start: Allowed to start offload data path for the device
 * @start_on_wakeup: Allow start upon wake up by device
 * @start_on_resume: Allow start upon driver resume
 * @start_on_timeout: Timeout in milliseconds after which @start is enabled
 * @start_timer: Timer associated with @start_on_timer
 * @if_state: Interface state - one or more bit numbers IPA_ETH_IF_ST_*
 * @pm_handle: IPA PM client handle for the device
 * @bus_priv: Private field for use by offload subsystem bus layer
@@ -468,6 +472,12 @@ struct ipa_eth_device {

	bool init;
	bool start;

	bool start_on_wakeup;
	bool start_on_resume;
	u32 start_on_timeout;
	struct timer_list start_timer;

	unsigned long if_state;

	u32 pm_handle;