Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6762b842 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mhi: core: Enable BB scheme for MHI"

parents 319472d7 522f301f
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -11,7 +11,6 @@ Required properties:
    below properties:
	- esoc-names
	- esoc-0
  - wakeup-gpios: gpio used to wake device from low power mode.
  - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
    below optional properties:
	- qcom,msm-bus,name
@@ -28,8 +27,6 @@ Example:
		compatible = "qcom,mhi";
		esoc-names = "mdm";
		esoc-0 = <&mdm1>;
		mhi-device-wake-gpio =
			<&msmgpio 108 0>;
		qcom,msm-bus,name = "mhi";
		qcom,msm-bus,num-cases = <2>;
		qcom,msm-bus,num-paths = <1>;
+49 −26
Original line number Diff line number Diff line
@@ -49,7 +49,6 @@ struct pcie_core_info {
	void __iomem *bar0_end;
	void __iomem *bar2_base;
	void __iomem *bar2_end;
	u32 device_wake_gpio;
	u32 irq_base;
	u32 max_nr_msis;
	struct pci_saved_state *pcie_state;
@@ -252,7 +251,7 @@ enum MHI_EVENT_CCS {
	MHI_EVENT_CC_OOB = 0x5,
	MHI_EVENT_CC_DB_MODE = 0x6,
	MHI_EVENT_CC_UNDEFINED_ERR = 0x10,
	MHI_EVENT_CC_RING_EL_ERR = 0x11,
	MHI_EVENT_CC_BAD_TRE = 0x11,
};

struct mhi_ring {
@@ -357,12 +356,14 @@ struct mhi_state_work_queue {
	enum STATE_TRANSITION buf[MHI_WORK_Q_MAX_SIZE];
};

struct mhi_control_seg {
	union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1];
	struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS];
	struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS];
	struct mhi_event_ctxt *mhi_ec_list;
	u32 padding;
struct mhi_buf_info {
	dma_addr_t bb_p_addr;
	void *bb_v_addr;
	void *client_buf;
	size_t buf_len;
	size_t filled_size;
	enum dma_data_direction dir;
	int bb_active;
};

struct mhi_counters {
@@ -384,6 +385,7 @@ struct mhi_counters {
	u32 *ev_counter;
	atomic_t outbound_acks;
	u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
	u32 bb_used[MHI_MAX_CHANNELS];
};

struct mhi_flags {
@@ -423,19 +425,37 @@ struct dev_mmio_info {
	u64 mmio_len;
	u32 nr_event_rings;
	dma_addr_t dma_ev_ctxt; /* Bus address of ECABAP*/
	void *dma_ev_rings;
};

struct mhi_ring_ctxt {
	struct mhi_event_ctxt *ec_list;
	struct mhi_chan_ctxt *cc_list;
	struct mhi_cmd_ctxt *cmd_ctxt;
	dma_addr_t dma_ec_list;
	dma_addr_t dma_cc_list;
	dma_addr_t dma_cmd_ctxt;
};

struct mhi_dev_space {
	void *dev_mem_start;
	dma_addr_t dma_dev_mem_start;
	size_t dev_mem_len;
	struct mhi_ring_ctxt ring_ctxt;
	dma_addr_t start_win_addr;
	dma_addr_t end_win_addr;
};

struct mhi_device_ctxt {
	enum MHI_STATE mhi_state;
	enum MHI_EXEC_ENV dev_exec_env;

	struct mhi_dev_space dev_space;
	struct mhi_pcie_dev_info *dev_info;
	struct pcie_core_info *dev_props;
	struct mhi_control_seg *mhi_ctrl_seg;
	struct mhi_meminfo *mhi_ctrl_seg_info;
	struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];

	struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];

	struct mhi_ring *mhi_local_event_ctxt;
	struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];

@@ -511,8 +531,14 @@ struct mhi_event_ring_cfg {
	irqreturn_t (*mhi_handler_ptr)(int , void *);
};

struct mhi_data_buf {
	dma_addr_t bounce_buffer;
	dma_addr_t client_buffer;
	u32 bounce_flag;
};

irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id);
enum MHI_STATUS mhi_reset_all_thread_queues(
int mhi_reset_all_thread_queues(
					struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_add_elements_to_event_rings(
				struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -521,20 +547,18 @@ int get_nr_avail_ring_elements(struct mhi_ring *ring);
enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *loc_1,
					void *loc_2, u32 *nr_el);
enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
				struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
		u32 nr_ev_el, u32 event_ring_index);
/*Mhi Initialization functions */
enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt,
				enum MHI_INIT_ERROR_STAGE cleanup_stage);
enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *dest_device,
			enum MHI_COMMAND which_cmd, u32 chan);
enum MHI_STATUS mhi_queue_tx_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
				enum MHI_CLIENT_CHANNEL chan,
				void *payload,
				size_t payload_size);
enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
				   uintptr_t trb_list_phy,
				   uintptr_t trb_list_virt,
				   u64 el_per_ring,
@@ -545,11 +569,11 @@ enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
					      u32 chan);
enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp,
int delete_element(struct mhi_ring *ring, void **rp,
			 void **wp, void **assigned_addr);
enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring, void **assigned_addr);
enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring, void **assigned_addr);
enum MHI_STATUS get_element_index(struct mhi_ring *ring, void *address,
int ctxt_add_element(struct mhi_ring *ring, void **assigned_addr);
int ctxt_del_element(struct mhi_ring *ring, void **assigned_addr);
int get_element_index(struct mhi_ring *ring, void *address,
							uintptr_t *index);
enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
	struct mhi_ring *ring, enum MHI_RING_TYPE ring_type, u32 ring_index);
@@ -565,8 +589,8 @@ enum MHI_STATUS mhi_test_for_device_ready(
					struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_test_for_device_reset(
					struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
enum MHI_STATUS validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
int mhi_state_change_thread(void *ctxt);
enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
					enum STATE_TRANSITION new_state);
@@ -575,7 +599,6 @@ enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer);
int mhi_pci_suspend(struct pci_dev *dev, pm_message_t state);
int mhi_pci_resume(struct pci_dev *dev);
int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev);
int mhi_init_gpios(struct mhi_pcie_dev_info *mhi_pcie_dev);
int mhi_init_pm_sysfs(struct device *dev);
void mhi_rem_pm_sysfs(struct device *dev);
void mhi_pci_remove(struct pci_dev *mhi_device);
@@ -589,7 +612,7 @@ void mhi_notify_client(struct mhi_client_handle *client_handle,
		       enum MHI_CB_REASON reason);
int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
			void *hcpu);
enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
@@ -621,7 +644,7 @@ int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
		  enum MHI_TYPE_EVENT_RING type);
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
				int index);
int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt);
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);

#endif
+6 −3
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/platform_device.h>

#include "mhi_sys.h"
#include "mhi.h"
@@ -73,10 +74,12 @@ static ssize_t bhi_write(struct file *file,
		goto bhi_copy_error;
	}
	amount_copied = count;
	/* Flush the writes, in anticipation for a device read */
	wmb();
	mhi_log(MHI_MSG_INFO,
		"Copied image from user at addr: %p\n", bhi_ctxt->image_loc);
	bhi_ctxt->phy_image_loc = dma_map_single(NULL,
	bhi_ctxt->phy_image_loc = dma_map_single(
			&mhi_dev_ctxt->dev_info->plat_dev->dev,
			bhi_ctxt->image_loc,
			bhi_ctxt->image_size,
			DMA_TO_DEVICE);
@@ -131,7 +134,8 @@ static ssize_t bhi_write(struct file *file,
			break;
		usleep_range(20000, 25000);
	}
	dma_unmap_single(NULL, bhi_ctxt->phy_image_loc,
	dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
			bhi_ctxt->phy_image_loc,
			bhi_ctxt->image_size, DMA_TO_DEVICE);

	kfree(bhi_ctxt->unaligned_image_loc);
@@ -168,7 +172,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
	bhi_ctxt->bhi_base = mhi_pcie_device->core.bar0_base;
	pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHIOFF);
	bhi_ctxt->bhi_base += pcie_word_val;
	wmb();

	mhi_log(MHI_MSG_INFO,
		"Successfully registered char dev. bhi base is: 0x%p.\n",
+18 −82
Original line number Diff line number Diff line
@@ -73,30 +73,17 @@ dt_error:
	return r;
}

int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt)
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int r = 0, i;
	struct mhi_event_ctxt *ev_ctxt = NULL;

	size_t ctxt_size = sizeof(struct mhi_event_ctxt) *
				    mhi_dev_ctxt->mmio_info.nr_event_rings;
	/* Allocate the event contexts in uncached memory */
	mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list =
				dma_alloc_coherent(
				&mhi_dev_ctxt->dev_info->plat_dev->dev,
				ctxt_size,
				&mhi_dev_ctxt->mmio_info.dma_ev_ctxt,
				GFP_KERNEL);
	if (!mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list)
		return -ENOMEM;
	int r = 0;

	mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
					mhi_dev_ctxt->mmio_info.nr_event_rings,
					GFP_KERNEL);
	if (!mhi_dev_ctxt->mhi_local_event_ctxt) {
		r = -ENOMEM;
		goto free_ec_list;
	}

	if (!mhi_dev_ctxt->mhi_local_event_ctxt)
		return -ENOMEM;

	mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
@@ -111,73 +98,19 @@ int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt)
		r = -ENOMEM;
		goto free_ev_counter;
	}

	mhi_dev_ctxt->mmio_info.dma_ev_rings = kzalloc(sizeof(void *) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
	if (!mhi_dev_ctxt->mmio_info.dma_ev_rings) {
		r = -ENOMEM;
		goto free_msi_counter;
	}
	mhi_log(MHI_MSG_INFO, "Allocated ECABAP at Virt: 0x%p, Phys 0x%lx\n",
			mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list,
			(uintptr_t)mhi_dev_ctxt->mmio_info.dma_ev_ctxt);

	/* Allocate event ring elements for each ring */
	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
		dma_addr_t ring_base_addr;
		ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i];
		mhi_dev_ctxt->mhi_local_event_ctxt[i].base =
			dma_alloc_coherent(
				&mhi_dev_ctxt->dev_info->plat_dev->dev,
				sizeof(union mhi_event_pkt) *
				mhi_dev_ctxt->ev_ring_props[i].nr_desc,
				&ring_base_addr,
				GFP_KERNEL);
		if (!mhi_dev_ctxt->mhi_local_event_ctxt[i].base) {
			r = -ENOMEM;
			goto free_event_ring;
		}

		ev_ctxt->mhi_event_ring_base_addr = ring_base_addr;
		ev_ctxt->mhi_event_read_ptr = ring_base_addr;
		ev_ctxt->mhi_event_write_ptr = ring_base_addr;

		mhi_dev_ctxt->mhi_local_event_ctxt[i].wp =
				mhi_dev_ctxt->mhi_local_event_ctxt[i].base;
		mhi_dev_ctxt->mhi_local_event_ctxt[i].rp =
				mhi_dev_ctxt->mhi_local_event_ctxt[i].base;
		mhi_log(MHI_MSG_INFO, "Allocated Event Ring %d\n", i);
	}
	return r;

free_event_ring:
	for (; i > 0; --i) {
		ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i];
		dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
				  sizeof(union mhi_event_pkt *) *
				  mhi_dev_ctxt->ev_ring_props[i].nr_desc,
				  mhi_dev_ctxt->mhi_local_event_ctxt[i].base,
				  ev_ctxt->mhi_event_ring_base_addr);
	}
	kfree(mhi_dev_ctxt->mmio_info.dma_ev_rings);
free_msi_counter:
	kfree(mhi_dev_ctxt->counters.msi_counter);
free_ev_counter:
	kfree(mhi_dev_ctxt->counters.ev_counter);
free_local_ec_list:
	kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
free_ec_list:
	dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
				ctxt_size,
				mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list,
				mhi_dev_ctxt->mmio_info.dma_ev_ctxt);
	return r;
}
void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
	struct mhi_ring *event_ctxt = NULL;
	u64 db_value = 0;

	event_ctxt =
		&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
	db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
@@ -203,22 +136,20 @@ static enum MHI_STATUS mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
	return MHI_STATUS_SUCCESS;
}

int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int i;
	struct mhi_ring *mhi_local_event_ctxt = NULL;
	struct mhi_event_ctxt *event_ctxt;
	struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;

	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
		event_ctxt = &mhi_ctrl->mhi_ec_list[i];
		event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
		mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
		mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
			mhi_dev_ctxt->ev_ring_props[i].nr_desc,
			mhi_dev_ctxt->ev_ring_props[i].intmod,
			mhi_dev_ctxt->ev_ring_props[i].msi_vec);
	}
	return 0;
}

int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -291,7 +222,8 @@ enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
	mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n",
			mhi_dev_ctxt->mmio_info.mmio_addr,
			mhi_dev_ctxt->mmio_info.mmio_len);
	mhi_log(MHI_MSG_INFO, "Initializing event ring %d\n", ring_index);
	mhi_log(MHI_MSG_INFO, "Initializing event ring %d with %d desc\n",
			ring_index, nr_ev_el);

	for (i = 0; i < nr_ev_el - 1; ++i) {
		ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt);
@@ -312,16 +244,20 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
{
	struct mhi_event_ctxt *ev_ctxt;
	struct mhi_ring *local_ev_ctxt;

	mhi_log(MHI_MSG_VERBOSE, "Resetting event index %d\n", index);
	ev_ctxt =
	    &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[index];
	    &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
	local_ev_ctxt =
	    &mhi_dev_ctxt->mhi_local_event_ctxt[index];
	ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
	ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
	local_ev_ctxt->rp = local_ev_ctxt->base;
	local_ev_ctxt->wp = local_ev_ctxt->base;

	ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
	ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
	ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
	/* Flush writes to MMIO */
	wmb();
}
+32 −30
Original line number Diff line number Diff line
@@ -20,6 +20,8 @@
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/err.h>

#define CREATE_TRACE_POINTS
#include "mhi_trace.h"
@@ -64,8 +66,7 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
{
	int ret_val = 0;
	u32 i = 0, j = 0;
	u32 retry_count = 0;
	u32 msi_number = 32;
	u32 requested_msi_number = 32, actual_msi_number = 0;
	struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
	struct pci_dev *pcie_device = NULL;

@@ -74,15 +75,14 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
	pcie_device = mhi_pcie_dev->pcie_device;

	ret_val = mhi_init_pcie_device(mhi_pcie_dev);
	if (0 != ret_val) {
	if (ret_val) {
		mhi_log(MHI_MSG_CRITICAL,
				"Failed to initialize pcie device, ret %d\n",
				ret_val);
		return -ENODEV;
	}
	ret_val = mhi_init_device_ctxt(mhi_pcie_dev,
					&mhi_pcie_dev->mhi_ctxt);
	if (MHI_STATUS_SUCCESS != ret_val) {
	ret_val = mhi_init_device_ctxt(mhi_pcie_dev, &mhi_pcie_dev->mhi_ctxt);
	if (ret_val) {
		mhi_log(MHI_MSG_CRITICAL,
			"Failed to initialize main MHI ctxt ret %d\n",
			ret_val);
@@ -112,12 +112,20 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
	}

	device_disable_async_suspend(&pcie_device->dev);
	ret_val = pci_enable_msi_range(pcie_device, 0, msi_number);
	if (0 != ret_val) {
	ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
	if (IS_ERR_VALUE(ret_val)) {
		mhi_log(MHI_MSG_ERROR,
			"Failed to enable MSIs for pcie dev ret_val %d.\n",
			ret_val);
		goto msi_config_err;
	} else if (ret_val) {
		mhi_log(MHI_MSG_INFO,
			"Hrmmm, got fewer MSIs than we requested. Requested %d, got %d.\n",
			requested_msi_number, ret_val);
		actual_msi_number = ret_val;
	} else {
		mhi_log(MHI_MSG_VERBOSE,
			"Got all requested MSIs, moving on\n");
	}
	mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;

@@ -142,23 +150,7 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
	mhi_pcie_dev->core.irq_base = pcie_device->irq;
	mhi_log(MHI_MSG_VERBOSE,
		"Setting IRQ Base to 0x%x\n", mhi_pcie_dev->core.irq_base);
	mhi_pcie_dev->core.max_nr_msis = msi_number;
	do  {
		ret_val = mhi_init_gpios(mhi_pcie_dev);
		switch (ret_val) {
		case -EPROBE_DEFER:
			mhi_log(MHI_MSG_VERBOSE,
				"DT requested probe defer, wait and retry\n");
			break;
		case 0:
			break;
		default:
			mhi_log(MHI_MSG_CRITICAL,
				"Could not get gpio from struct device tree!\n");
			goto msi_config_err;
		}
		retry_count++;
	} while ((retry_count < DT_WAIT_RETRIES) && (ret_val == -EPROBE_DEFER));
	mhi_pcie_dev->core.max_nr_msis = requested_msi_number;
	ret_val = mhi_init_pm_sysfs(&pcie_device->dev);
	if (ret_val != 0) {
		mhi_log(MHI_MSG_ERROR, "Failed to setup sysfs.\n");
@@ -189,17 +181,26 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
	return ret_val;

mhi_state_transition_error:
	if (MHI_STATUS_SUCCESS != mhi_clean_init_stage(&mhi_pcie_dev->mhi_ctxt,
				MHI_INIT_ERROR_STAGE_UNWIND_ALL))
		mhi_log(MHI_MSG_ERROR, "Could not clean up context\n");
	kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
	kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
	kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
	kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
	kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
	kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
	dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
		   mhi_dev_ctxt->dev_space.dev_mem_len,
		   mhi_dev_ctxt->dev_space.dev_mem_start,
		   mhi_dev_ctxt->dev_space.dma_dev_mem_start);
	kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
	kfree(mhi_dev_ctxt->mhi_chan_mutex);
	kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
	kfree(mhi_dev_ctxt->ev_ring_props);
	mhi_rem_pm_sysfs(&pcie_device->dev);
sysfs_config_err:
	gpio_free(mhi_pcie_dev->core.device_wake_gpio);
	for (; i >= 0; --i)
		free_irq(pcie_device->irq + i, &pcie_device->dev);
	debugfs_remove_recursive(mhi_pcie_dev->mhi_ctxt.mhi_parent_folder);
msi_config_err:
	pci_disable_msi(pcie_device);
	pci_disable_device(pcie_device);
	return ret_val;
}
@@ -255,6 +256,7 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
static int mhi_plat_probe(struct platform_device *pdev)
{
	u32 nr_dev = mhi_devices.nr_of_devices;

	mhi_log(MHI_MSG_INFO, "Entered\n");
	mhi_devices.device_list[nr_dev].plat_dev = pdev;
	mhi_log(MHI_MSG_INFO, "Exited\n");
Loading