Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c41ce96e authored by Sujeev Dias's avatar Sujeev Dias
Browse files

mhi: core: power management redesign



In order to support subsystem restart, and
link down recovery, redesign mhi power
management state machine.

CRs-Fixed: 1081654
Change-Id: I3005b829bfdea1d3e9f086676c446b62b5d30d0b
Signed-off-by: default avatarSujeev Dias <sdias@codeaurora.org>
parent 64120cd2
Loading
Loading
Loading
Loading
+55 −45
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/msm_pcie.h>
@@ -134,6 +135,23 @@ enum MHI_BRSTMODE {
	MHI_BRSTMODE_ENABLE = 0x3
};

enum MHI_PM_STATE {
	MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
	MHI_PM_POR = 0x1, /* Power On Reset State */
	MHI_PM_M0 = 0x2,
	MHI_PM_M1 = 0x4,
	MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
	MHI_PM_M2 = 0x10,
	MHI_PM_M3_ENTER = 0x20,
	MHI_PM_M3 = 0x40,
	MHI_PM_M3_EXIT = 0x80,
};

#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
							MHI_PM_M1 | MHI_PM_M2))
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
					(pm_state < MHI_PM_M3_EXIT))
struct __packed mhi_event_ctxt {
	u32 mhi_intmodt;
	u32 mhi_event_er_type;
@@ -184,7 +202,6 @@ enum MHI_PKT_TYPE {
	MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
	MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
	MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
	MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F,
	MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
	MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
	MHI_PKT_TYPE_TX_EVENT = 0x22,
@@ -296,6 +313,7 @@ struct mhi_ring {
	struct db_mode db_mode;
	u32 msi_disable_cntr;
	u32 msi_enable_cntr;
	spinlock_t ring_lock;
};

enum MHI_CMD_STATUS {
@@ -353,12 +371,19 @@ struct mhi_chan_info {
	u32 flags;
};

struct mhi_chan_cfg {
	enum MHI_COMMAND current_cmd;
	struct mutex chan_lock;
	spinlock_t event_lock; /* completion event lock */
	struct completion cmd_complete;
	struct mhi_cmd_complete_event_pkt cmd_event_pkt;
	union mhi_cmd_pkt cmd_pkt;
};

struct mhi_client_handle {
	struct mhi_chan_info chan_info;
	struct mhi_device_ctxt *mhi_dev_ctxt;
	struct mhi_client_info_t client_info;
	struct completion chan_reset_complete;
	struct completion chan_open_complete;
	void *user_data;
	struct mhi_result result;
	u32 device_index;
@@ -395,40 +420,25 @@ struct mhi_buf_info {

struct mhi_counters {
	u32 m0_m1;
	u32 m1_m0;
	u32 m1_m2;
	u32 m2_m0;
	u32 m0_m3;
	u32 m3_m0;
	u32 m1_m3;
	u32 mhi_reset_cntr;
	u32 mhi_ready_cntr;
	u32 m3_event_timeouts;
	u32 m0_event_timeouts;
	u32 m2_event_timeouts;
	u32 nr_irq_migrations;
	u32 *msi_counter;
	u32 *ev_counter;
	atomic_t outbound_acks;
	u32 m3_m0;
	u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
	u32 bb_used[MHI_MAX_CHANNELS];
	atomic_t device_wake;
	atomic_t outbound_acks;
	atomic_t events_pending;
	u32 *msi_counter;
	u32 mhi_reset_cntr;
};

struct mhi_flags {
	u32 mhi_initialized;
	u32 pending_M3;
	u32 pending_M0;
	u32 link_up;
	u32 kill_threads;
	atomic_t data_pending;
	atomic_t events_pending;
	atomic_t pending_resume;
	atomic_t pending_ssr;
	atomic_t pending_powerup;
	atomic_t m2_transition;
	int stop_threads;
	atomic_t device_wake;
	u32 ssr;
	u32 kill_threads;
	u32 ev_thread_stopped;
	u32 st_thread_stopped;
};
@@ -474,44 +484,35 @@ struct mhi_dev_space {
};

struct mhi_device_ctxt {
	enum MHI_STATE mhi_state;
	enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
	enum MHI_STATE mhi_state; /* protocol state */
	enum MHI_EXEC_ENV dev_exec_env;

	struct mhi_dev_space dev_space;
	struct mhi_pcie_dev_info *dev_info;
	struct pcie_core_info *dev_props;
	struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];

	struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];

	struct mhi_ring *mhi_local_event_ctxt;
	struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
	struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];


	struct mutex *mhi_chan_mutex;
	struct mutex mhi_link_state;
	spinlock_t *mhi_ev_spinlock_list;
	struct mutex *mhi_cmd_mutex_list;
	struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
	struct mhi_event_ring_cfg *ev_ring_props;
	struct task_struct *event_thread_handle;
	struct task_struct *st_thread_handle;
	struct tasklet_struct ev_task; /* Process control Events */
	struct work_struct process_m1_worker;
	struct mhi_wait_queues mhi_ev_wq;
	struct dev_mmio_info mmio_info;

	u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
	u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
	spinlock_t *db_write_lock;

	struct mhi_state_work_queue state_change_work_item_list;
	enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];

	u32 cmd_ring_order;
	struct mhi_counters counters;
	struct mhi_flags flags;

	u32 device_wake_asserted;

	rwlock_t xfer_lock;
	struct hrtimer m1_timer;
	ktime_t m1_timeout;

@@ -524,11 +525,12 @@ struct mhi_device_ctxt {

	unsigned long esoc_notif;
	enum STATE_TRANSITION base_state;
	atomic_t outbound_acks;

	rwlock_t pm_xfer_lock; /* lock to control PM State */
	spinlock_t dev_wake_lock; /* lock to set wake bit */
	struct mutex pm_lock;
	struct wakeup_source w_lock;

	int enable_lpm;
	char *chan_info;
	struct dentry *mhi_parent_folder;
};
@@ -640,8 +642,9 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
						enum MHI_CB_REASON reason);
void mhi_notify_client(struct mhi_client_handle *client_handle,
		       enum MHI_CB_REASON reason);
int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
			    bool force_set);
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
			void *hcpu);
@@ -677,12 +680,19 @@ int mhi_runtime_suspend(struct device *dev);
int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
		   struct mhi_chan_info *chan_info);
int mhi_runtime_resume(struct device *dev);
int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_runtime_idle(struct device *dev);
int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
		  enum MHI_TYPE_EVENT_RING type);
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
				int index);
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
void process_m1_transition(struct work_struct *work);
int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
		     enum MHI_STATE new_state);
const char *state_transition_str(enum STATE_TRANSITION state);
void mhi_ctrl_ev_task(unsigned long data);

#endif
+21 −6
Original line number Diff line number Diff line
@@ -41,6 +41,9 @@ static ssize_t bhi_write(struct file *file,
	size_t amount_copied = 0;
	uintptr_t align_len = 0x1000;
	u32 tx_db_val = 0;
	rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
	const long bhi_timeout_ms = 1000;
	long timeout;

	if (buf == NULL || 0 == count)
		return -EIO;
@@ -48,8 +51,12 @@ static ssize_t bhi_write(struct file *file,
	if (count > BHI_MAX_IMAGE_SIZE)
		return -ENOMEM;

	wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
			mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
	timeout = wait_event_interruptible_timeout(
				*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
				mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
				msecs_to_jiffies(bhi_timeout_ms));
	if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
		return -EIO;

	mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);

@@ -95,6 +102,11 @@ static ssize_t bhi_write(struct file *file,
	bhi_ctxt->image_size = count;

	/* Write the image size */
	read_lock_bh(pm_xfer_lock);
	if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
		read_unlock_bh(pm_xfer_lock);
		goto bhi_copy_error;
	}
	pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
	mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
				BHI_IMGADDR_HIGH,
@@ -119,10 +131,15 @@ static ssize_t bhi_write(struct file *file,
			BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);

	mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);

	read_unlock_bh(pm_xfer_lock);
	for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
		u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;

		read_lock_bh(pm_xfer_lock);
		if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
			read_unlock_bh(pm_xfer_lock);
			goto bhi_copy_error;
		}
		err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
		errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
		errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
@@ -131,6 +148,7 @@ static ssize_t bhi_write(struct file *file,
						BHI_STATUS,
						BHI_STATUS_MASK,
						BHI_STATUS_SHIFT);
		read_unlock_bh(pm_xfer_lock);
		mhi_log(MHI_MSG_CRITICAL,
		"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
			tx_db_val, err, errdbg1, errdbg2, errdbg3);
@@ -176,9 +194,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
	    || 0 == mhi_pcie_device->core.bar0_end)
		return -EIO;

	mhi_log(MHI_MSG_INFO,
		"Successfully registered char dev. bhi base is: 0x%p.\n",
		bhi_ctxt->bhi_base);
	ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
	if (IS_ERR_VALUE(ret_val)) {
		mhi_log(MHI_MSG_CRITICAL,
+11 −13
Original line number Diff line number Diff line
@@ -89,32 +89,31 @@ dt_error:
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int r = 0;
	int i;

	mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
					mhi_dev_ctxt->mmio_info.nr_event_rings,
					GFP_KERNEL);

	if (!mhi_dev_ctxt->mhi_local_event_ctxt)
		return -ENOMEM;

	mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
	if (!mhi_dev_ctxt->counters.ev_counter) {
		r = -ENOMEM;
		goto free_local_ec_list;
	}
	mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
	if (!mhi_dev_ctxt->counters.msi_counter) {
		r = -ENOMEM;
		goto free_ev_counter;
		goto free_local_ec_list;
	}

	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
		struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
			mhi_local_event_ctxt[i];

		spin_lock_init(&mhi_ring->ring_lock);
	}

	return r;

free_ev_counter:
	kfree(mhi_dev_ctxt->counters.ev_counter);
free_local_ec_list:
	kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
	return r;
@@ -241,10 +240,9 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
	u32 i = 0;
	unsigned long flags = 0;
	int ret_val = 0;
	spinlock_t *lock =
		&mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
	struct mhi_ring *event_ctxt =
		&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
	spinlock_t *lock = &event_ctxt->ring_lock;

	if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
		mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
+102 −26
Original line number Diff line number Diff line
@@ -96,22 +96,6 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
				"Failed to register with esoc ret %d.\n",
				ret_val);
	}
	mhi_pcie_dev->mhi_ctxt.bus_scale_table =
				msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev);
	mhi_pcie_dev->mhi_ctxt.bus_client =
		msm_bus_scale_register_client(
				mhi_pcie_dev->mhi_ctxt.bus_scale_table);
	if (!mhi_pcie_dev->mhi_ctxt.bus_client) {
		mhi_log(MHI_MSG_CRITICAL,
			"Could not register for bus control ret: %d.\n",
			mhi_pcie_dev->mhi_ctxt.bus_client);
	} else {
		ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1);
		if (ret_val)
			mhi_log(MHI_MSG_CRITICAL,
				"Could not set bus frequency ret: %d\n",
				ret_val);
	}

	device_disable_async_suspend(&pcie_device->dev);
	ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
@@ -188,9 +172,7 @@ mhi_state_transition_error:
		   mhi_dev_ctxt->dev_space.dev_mem_len,
		   mhi_dev_ctxt->dev_space.dev_mem_start,
		   mhi_dev_ctxt->dev_space.dma_dev_mem_start);
	kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
	kfree(mhi_dev_ctxt->mhi_chan_mutex);
	kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);

	kfree(mhi_dev_ctxt->ev_ring_props);
	mhi_rem_pm_sysfs(&pcie_device->dev);
sysfs_config_err:
@@ -203,7 +185,9 @@ msi_config_err:
}

static const struct dev_pm_ops pm_ops = {
	SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, NULL)
	SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
			   mhi_runtime_resume,
			   mhi_runtime_idle)
	SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
};

@@ -222,9 +206,10 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
	int ret_val = 0;
	struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
	struct platform_device *plat_dev;
	struct mhi_device_ctxt *mhi_dev_ctxt;
	u32 nr_dev = mhi_devices.nr_of_devices;

	mhi_log(MHI_MSG_INFO, "Entering.\n");
	mhi_log(MHI_MSG_INFO, "Entering\n");
	mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
	if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
		mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
@@ -234,29 +219,120 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
	mhi_devices.nr_of_devices++;
	plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
	pcie_device->dev.of_node = plat_dev->dev.of_node;
	pm_runtime_put_noidle(&pcie_device->dev);
	mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
	mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
	INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
	mutex_init(&mhi_dev_ctxt->pm_lock);
	rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
	spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
	tasklet_init(&mhi_dev_ctxt->ev_task,
		     mhi_ctrl_ev_task,
		     (unsigned long)mhi_dev_ctxt);

	mhi_dev_ctxt->flags.link_up = 1;
	ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
	mhi_pcie_dev->pcie_device = pcie_device;
	mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
	mhi_pcie_dev->mhi_pci_link_event.events =
			(MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP |
			 MSM_PCIE_EVENT_WAKEUP);
			(MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
	mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
	mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
	mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
	ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
	if (ret_val)
	if (ret_val) {
		mhi_log(MHI_MSG_ERROR,
			"Failed to register for link notifications %d.\n",
			ret_val);
		return ret_val;
	}

	/* Initialize MHI CNTXT */
	ret_val = mhi_ctxt_init(mhi_pcie_dev);
	if (ret_val) {
		mhi_log(MHI_MSG_ERROR,
			"MHI Initialization failed, ret %d\n",
			ret_val);
		goto deregister_pcie;
	}
	pci_set_master(mhi_pcie_dev->pcie_device);

	mutex_lock(&mhi_dev_ctxt->pm_lock);
	write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
	mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
	ret_val = set_mhi_base_state(mhi_pcie_dev);
	write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
	if (ret_val) {
		mhi_log(MHI_MSG_ERROR,
			"Error Setting MHI Base State %d\n", ret_val);
		goto unlock_pm_lock;
	}

	if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
		ret_val = bhi_probe(mhi_pcie_dev);
		if (ret_val) {
			mhi_log(MHI_MSG_ERROR,
				"Error with bhi_probe ret:%d", ret_val);
			goto unlock_pm_lock;
		}
	}

	init_mhi_base_state(mhi_dev_ctxt);

	pm_runtime_set_autosuspend_delay(&pcie_device->dev,
					 MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
	pm_runtime_use_autosuspend(&pcie_device->dev);
	pm_suspend_ignore_children(&pcie_device->dev, true);

	/*
	 * pci framework will increment usage count (twice) before
	 * calling local device driver probe function.
	 * 1st pci.c pci_pm_init() calls pm_runtime_forbid
	 * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
	 * Framework expect pci device driver to call pm_runtime_put_noidle
	 * to decrement usage count after successful probe and
	 * and call pm_runtime_allow to enable runtime suspend.
	 * MHI will allow runtime after entering AMSS state.
	 */
	pm_runtime_mark_last_busy(&pcie_device->dev);
	pm_runtime_put_noidle(&pcie_device->dev);

	/*
	 * Keep the MHI state in Active (M0) state until AMSS because EP
	 * would error fatal if we try to enter M1 before entering
	 * AMSS state.
	 */
	read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
	mhi_assert_device_wake(mhi_dev_ctxt, false);
	read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);

	mutex_unlock(&mhi_dev_ctxt->pm_lock);

	return 0;

unlock_pm_lock:
	mutex_unlock(&mhi_dev_ctxt->pm_lock);
deregister_pcie:
	msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
	return ret_val;
}

static int mhi_plat_probe(struct platform_device *pdev)
{
	u32 nr_dev = mhi_devices.nr_of_devices;
	struct mhi_device_ctxt *mhi_dev_ctxt;
	int r = 0;

	mhi_log(MHI_MSG_INFO, "Entered\n");
	mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;

	mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
	if (!mhi_dev_ctxt->bus_scale_table)
		return -ENODATA;
	mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
		(mhi_dev_ctxt->bus_scale_table);
	if (!mhi_dev_ctxt->bus_client)
		return -EINVAL;

	mhi_devices.device_list[nr_dev].plat_dev = pdev;
	r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
	if (r)
+14 −43
Original line number Diff line number Diff line
@@ -27,46 +27,21 @@ static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int i;

	mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
					mhi_dev_ctxt->mmio_info.nr_event_rings,
					GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
		goto ev_mutex_free;
	mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) *
						MHI_MAX_CHANNELS, GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->mhi_chan_mutex)
		goto chan_mutex_free;
	mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) *
						NR_OF_CMD_RINGS, GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list)
		goto cmd_mutex_free;

	mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) *
						MHI_MAX_CHANNELS, GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->db_write_lock)
		goto db_write_lock_free;
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
		spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
	for (i = 0; i < NR_OF_CMD_RINGS; ++i)
		mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
	rwlock_init(&mhi_dev_ctxt->xfer_lock);
	mutex_init(&mhi_dev_ctxt->mhi_link_state);
	mutex_init(&mhi_dev_ctxt->pm_lock);
	atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
	return 0;
	for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
		struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];

db_write_lock_free:
	kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
cmd_mutex_free:
	kfree(mhi_dev_ctxt->mhi_chan_mutex);
chan_mutex_free:
	kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
ev_mutex_free:
	return -ENOMEM;
		mutex_init(&mhi_dev_ctxt->mhi_chan_cfg[i].chan_lock);
		spin_lock_init(&mhi_dev_ctxt->mhi_chan_cfg[i].event_lock);
		spin_lock_init(&ring->ring_lock);
	}

	for (i = 0; i < NR_OF_CMD_RINGS; i++) {
		struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_cmd_ctxt[i];

		spin_lock_init(&ring->ring_lock);
	}

	return 0;
}

size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -549,7 +524,6 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
	}
	init_event_ctxt_array(mhi_dev_ctxt);
	mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
	mhi_dev_ctxt->enable_lpm = 1;

	r = mhi_spawn_threads(mhi_dev_ctxt);
	if (r) {
@@ -575,9 +549,6 @@ error_wq_init:
		   mhi_dev_ctxt->dev_space.dma_dev_mem_start);
error_during_dev_mem_init:
error_during_local_ev_ctxt:
	kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
	kfree(mhi_dev_ctxt->mhi_chan_mutex);
	kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
error_during_sync:
	kfree(mhi_dev_ctxt->ev_ring_props);
error_during_props:
Loading