Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0394e94 authored by Andrei Danaila's avatar Andrei Danaila Committed by Matt Wagantall
Browse files

mhi: core: Enable runtime PM



Enable runtime PM in the MHI driver to increase transparency
between kernel and device driver and synchronize runtime PM
actions with system wide power states.

Change-Id: I8b87c2ed7b0be8a4cf9568ac1fb772eebe416dc6
Signed-off-by: default avatarAndrei Danaila <adanaila@codeaurora.org>
parent 823c5dd7
Loading
Loading
Loading
Loading
+4 −8
Original line number Diff line number Diff line
@@ -392,8 +392,6 @@ struct mhi_flags {
	u32 kill_threads;
	atomic_t data_pending;
	atomic_t events_pending;
	atomic_t m0_work_enabled;
	atomic_t m3_work_enabled;
	atomic_t pending_resume;
	atomic_t pending_ssr;
	atomic_t pending_powerup;
@@ -460,10 +458,7 @@ struct mhi_device_ctxt {
	rwlock_t xfer_lock;
	struct hrtimer m1_timer;
	ktime_t m1_timeout;
	struct delayed_work m3_work;
	struct work_struct m0_work;

	struct workqueue_struct *work_queue;
	ktime_t ul_acc_tmr_timeout;
	struct mhi_chan_counters mhi_chan_cntr[MHI_MAX_CHANNELS];
	u32 ev_counter[MHI_MAX_CHANNELS];
	u32 bus_client;
@@ -574,8 +569,6 @@ int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt);
void delayed_m3(struct work_struct *work);
void m0_work(struct work_struct *work);
int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -592,5 +585,8 @@ void mhi_reg_write(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 mhi_reg_read(void __iomem *io_addr, uintptr_t io_offset);
u32 mhi_reg_read_field(void __iomem *io_addr, uintptr_t io_offset,
			 u32 mask, u32 shift);
void mhi_exit_m2(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_runtime_suspend(struct device *dev);
int mhi_runtime_resume(struct device *dev);

#endif
+10 −0
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include <linux/msm-bus.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>

#define CREATE_TRACE_POINTS
#include "mhi_trace.h"
@@ -157,6 +158,8 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)

	mhi_pcie_dev->mhi_ctxt.mmio_addr = mhi_pcie_dev->core.bar0_base;
	pcie_device->dev.platform_data = &mhi_pcie_dev->mhi_ctxt;
	mhi_pcie_dev->mhi_ctxt.dev_info->plat_dev->dev.platform_data =
						&mhi_pcie_dev->mhi_ctxt;
	if (mhi_pcie_dev->mhi_ctxt.base_state == STATE_TRANSITION_BHI) {
		ret_val = bhi_probe(mhi_pcie_dev);
		if (ret_val) {
@@ -189,6 +192,12 @@ msi_config_err:
	return ret_val;
}

static const struct dev_pm_ops pm_ops = {
	.runtime_suspend = mhi_runtime_suspend,
	.runtime_resume = mhi_runtime_resume,
	.runtime_idle = NULL,
};

static struct pci_driver mhi_pcie_driver = {
	.name = "mhi_pcie_drv",
	.id_table = mhi_pcie_device_id,
@@ -247,6 +256,7 @@ static struct platform_driver mhi_plat_driver = {
		.name		= "mhi",
		.owner		= THIS_MODULE,
		.of_match_table	= mhi_plat_match,
		.pm = &pm_ops,
	},
};

+0 −8
Original line number Diff line number Diff line
@@ -473,14 +473,6 @@ static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
static enum MHI_STATUS mhi_init_work_queues(
		struct mhi_device_ctxt *mhi_dev_ctxt)
{
	mhi_dev_ctxt->work_queue = create_singlethread_workqueue("mhi");
	if (NULL == mhi_dev_ctxt->work_queue) {
		mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER,
			"Failed to create MHI work queue.\n");
		return MHI_STATUS_ERROR;
	}
	INIT_DELAYED_WORK(&mhi_dev_ctxt->m3_work, delayed_m3);
	INIT_WORK(&mhi_dev_ctxt->m0_work, m0_work);
	return MHI_STATUS_SUCCESS;
}

+1 −0
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
#define MHI_MAX_RESUME_TIMEOUT 5000
#define MHI_MAX_SUSPEND_TIMEOUT 5000
#define MHI_MAX_CMD_TIMEOUT 500
#define MHI_RPM_AUTOSUSPEND_TMR_VAL_MS 1000

#define MAX_NR_MSI 4

+42 −69
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/pm_runtime.h>

#include "mhi_sys.h"
#include "mhi.h"
@@ -234,6 +235,7 @@ enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle)
	chan = client_handle->chan;
	mhi_log(MHI_MSG_INFO,
		"Entered: Client opening chan 0x%x\n", chan);
	init_completion(&client_handle->chan_open_complete);
	mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	switch (mhi_dev_ctxt->dev_exec_env) {
	case MHI_EXEC_ENV_PBL:
@@ -314,9 +316,6 @@ enum MHI_STATUS mhi_register_channel(struct mhi_client_handle **client_handle,

	(*client_handle)->user_data = UserData;

	init_completion(&(*client_handle)->chan_reset_complete);
	init_completion(&(*client_handle)->chan_open_complete);

	(*client_handle)->cb_mod = 1;
	(*client_handle)->chan_status = 0;
	(*client_handle)->magic = MHI_HANDLE_MAGIC;
@@ -353,6 +352,7 @@ void mhi_close_channel(struct mhi_client_handle *mhi_handle)
		return;
	chan = mhi_handle->chan;
	mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan);
	init_completion(&mhi_handle->chan_reset_complete);
	index = mhi_handle->device_index;
	if (!atomic_read(&mhi_handle->mhi_dev_ctxt->flags.pending_ssr)) {
		ret_val = mhi_send_cmd(mhi_handle->mhi_dev_ctxt,
@@ -362,15 +362,14 @@ void mhi_close_channel(struct mhi_client_handle *mhi_handle)
				"Failed to send reset cmd for chan %d ret %d\n",
				chan, ret_val);
		}
		r = wait_for_completion_interruptible_timeout(
		r = wait_for_completion_timeout(
				&mhi_handle->chan_reset_complete,
				msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));

		if (0 == r || -ERESTARTSYS == r) {
		if (!r)
			mhi_log(MHI_MSG_ERROR,
					"Failed to reset chan %d ret %d\n",
					chan, r);
		}
	} else {
		/*
		 * Assumption: Device is not playing with our
@@ -473,87 +472,62 @@ enum MHI_STATUS mhi_add_elements_to_event_rings(
	return ret_val;
}

static enum MHI_STATUS mhi_wake_dev_from_m3(
		struct mhi_device_ctxt *mhi_dev_ctxt)
void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
					  u32 chan)
{
	int r = 0;
	if (!atomic_cmpxchg(&mhi_dev_ctxt->flags.m0_work_enabled, 0, 1)) {
		mhi_log(MHI_MSG_INFO,
			"Initiating M0 work...\n");
		if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) {
			mhi_log(MHI_MSG_INFO,
			"Resume is pending, quitting ...\n");
			atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0);
			__pm_stay_awake(&mhi_dev_ctxt->w_lock);
			__pm_relax(&mhi_dev_ctxt->w_lock);
			return MHI_STATUS_SUCCESS;
		}
		r = queue_work(mhi_dev_ctxt->work_queue,
		     &mhi_dev_ctxt->m0_work);
		if (!r)
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to start M0 work.\n");
	} else {
		mhi_log(MHI_MSG_VERBOSE,
			"M0 work pending.\n");
	}
	return MHI_STATUS_SUCCESS;
}

static enum MHI_STATUS mhi_notify_device(
			struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
	unsigned long flags = 0;
	u64 db_value;
	struct mhi_chan_ctxt *chan_ctxt;
	chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];
	spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
	if (likely(((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) ||
		(MHI_STATE_M1 == mhi_dev_ctxt->mhi_state)) &&
		(chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR) &&
		!mhi_dev_ctxt->flags.pending_M3)) {

		mhi_dev_ctxt->mhi_chan_db_order[chan]++;
		db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
	u64 db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info,
		(uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
	mhi_dev_ctxt->mhi_chan_db_order[chan]++;
	if (IS_HARDWARE_CHANNEL(chan) && (chan % 2)) {
		if (unlikely(mhi_xfer_db_interval != 0)) {
			if ((mhi_dev_ctxt->
				mhi_chan_cntr[chan].pkts_xferd %
						mhi_xfer_db_interval) == 0) {
					mhi_xfer_db_interval) == 0)
				mhi_process_db(mhi_dev_ctxt,
					mhi_dev_ctxt->channel_db_addr,
						chan, db_value);
				}
		} else {
			if ((mhi_dev_ctxt->
				mhi_chan_cntr[chan].pkts_xferd %
						MHI_XFER_DB_INTERVAL) == 0) {
					MHI_XFER_DB_INTERVAL) == 0)
				mhi_process_db(mhi_dev_ctxt,
					mhi_dev_ctxt->channel_db_addr,
						chan, db_value);
		}
			}
	} else {
		mhi_process_db(mhi_dev_ctxt,
		     mhi_dev_ctxt->channel_db_addr,
		     chan, db_value);
	}
}

static enum MHI_STATUS mhi_notify_device(
			struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
	unsigned long flags = 0;
	struct mhi_chan_ctxt *chan_ctxt;
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan];

	spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
	if (likely(((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) ||
		(MHI_STATE_M1 == mhi_dev_ctxt->mhi_state)) &&
		(chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR) &&
		!mhi_dev_ctxt->flags.pending_M3)) {
		mhi_update_chan_db(mhi_dev_ctxt, chan);
	} else {
		mhi_log(MHI_MSG_VERBOSE,
			"Wakeup, pending data MHI state %d, chan state %d\n",
			mhi_dev_ctxt->mhi_state, chan_ctxt->mhi_chan_state);
		if (mhi_dev_ctxt->flags.pending_M3 ||
		    mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
			mhi_wake_dev_from_m3(mhi_dev_ctxt);
		}
			"Wakeup, pending data state %d chan state %d\n",
						mhi_dev_ctxt->mhi_state,
						chan_ctxt->mhi_chan_state);
		ret_val = MHI_STATUS_SUCCESS;
	}
	spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
	/*
	 * If there are no clients still sending we can trigger our
	 * inactivity timer
	 */
	return MHI_STATUS_SUCCESS;
	return ret_val;
}

enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle,
@@ -574,6 +548,7 @@ enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle,
			"Client buffer is of invalid length\n");
	mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	chan = client_handle->chan;
	pm_runtime_get(&mhi_dev_ctxt->dev_info->plat_dev->dev);

	/* Bump up the vote for pending data */
	read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
@@ -617,10 +592,10 @@ enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle,
	}

	mhi_notify_device(mhi_dev_ctxt, chan);
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	return MHI_STATUS_SUCCESS;
error:
	atomic_dec(&mhi_dev_ctxt->flags.data_pending);
	pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	return ret_val;
}
EXPORT_SYMBOL(mhi_queue_xfer);
@@ -649,6 +624,7 @@ enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
			chan, cmd);

	mhi_assert_device_wake(mhi_dev_ctxt);
	pm_runtime_get(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	/*
	 * If there is a cmd pending a struct device confirmation,
	 * do not send anymore for this channel
@@ -725,16 +701,13 @@ enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
		mhi_dev_ctxt->cmd_ring_order++;
		mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, 0,
								db_value);
	} else {
		mhi_log(MHI_MSG_INFO,
			"Waking dev from M3 for cmd %d on chan %d\n",
			 cmd, chan);
		mhi_wake_dev_from_m3(mhi_dev_ctxt);
	}

	mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan %d\n",
								cmd, chan);
	mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
	pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->plat_dev->dev);

	mhi_log(MHI_MSG_INFO, "Exited.\n");
	return MHI_STATUS_SUCCESS;
Loading