Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d92220fe authored by Andrei Danaila's avatar Andrei Danaila Committed by Matt Wagantall
Browse files

mhi: core: Parse ring information from DT



Parse MHI ring information from DT instead of statically allocating
data structure.

This is necessary as different platforms support different
configurations.

Change-Id: I59cd660459d627f93479a11257653611070fd3a4
Signed-off-by: default avatarAndrei Danaila <adanaila@codeaurora.org>
parent d45d90b4
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -18,6 +18,9 @@ Required properties:
	- qcom,msm-bus,num-cases
	- qcom,msm-bus,num-paths
	- qcom,msm-bus,vectors-KBps
  - mhi-chan-cfg-#: mhi channel configuration parameters for platform
  - mhi-event-cfg-#: mhi event ring configuration parameters for platform
  - mhi-event-rings: number of event rings supported by platform

Example:

@@ -33,4 +36,7 @@ Example:
		qcom,msm-bus,vectors-KBps =
				<100 512 0 0>,
				<100 512 1200000000 1200000000>;
		mhi-event-rings = <6>;
		mhi-chan-cfg-102 = <0x66 0x80 0x5 0x62>;
		mhi-event-cfg-0 = <0x80 0x0 0x0 0x11>;
	};
+1 −0
Original line number Diff line number Diff line
@@ -10,4 +10,5 @@ obj-y += mhi_sys.o
obj-y += mhi_bhi.o
obj-y += mhi_pm.o
obj-y += mhi_ssr.o
obj-y += mhi_event.o
CFLAGS_mhi_iface.o := -I$(src)
+55 −20
Original line number Diff line number Diff line
@@ -168,6 +168,7 @@ enum MHI_PKT_TYPE {
	MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
	MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
	MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
	MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F,
	MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
	MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
	MHI_PKT_TYPE_TX_EVENT = 0x22,
@@ -315,22 +316,28 @@ enum MHI_EXEC_ENV {
	MHI_EXEC_ENV_reserved = 0x80000000
};

struct mhi_chan_info {
	u32 chan_nr;
	u32 max_desc;
	u32 ev_ring;
	u32 flags;
};

struct mhi_client_handle {
	struct mhi_chan_info chan_info;
	struct mhi_device_ctxt *mhi_dev_ctxt;
	struct mhi_client_info_t client_info;
	struct completion chan_reset_complete;
	struct completion chan_open_complete;
	void *user_data;
	u32 chan;
	struct mhi_result result;
	u32 device_index;
	u32 event_ring_index;
	u32 msi_vec;
	u32 cb_mod;
	u32 intmod_t;
	u32 pkt_count;
	int magic;
	int chan_status;
	int event_ring_index;
};

enum MHI_EVENT_POLLING {
@@ -339,6 +346,12 @@ enum MHI_EVENT_POLLING {
	MHI_EVENT_POLLING_reserved = 0x80000000
};

enum MHI_TYPE_EVENT_RING {
	MHI_ER_DATA_TYPE = 0x1,
	MHI_ER_CTRL_TYPE = 0x2,
	MHI_ER_TYPE_RESERVED = 0x80000000
};

struct mhi_state_work_queue {
	spinlock_t *q_lock;
	struct mhi_ring q_info;
@@ -347,12 +360,10 @@ struct mhi_state_work_queue {
};

struct mhi_control_seg {
	union mhi_xfer_pkt *xfer_trb_list[MHI_MAX_CHANNELS];
	union mhi_event_pkt *ev_trb_list[NR_EV_RINGS];
	union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1];
	struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS];
	struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS];
	struct mhi_event_ctxt mhi_ec_list[NR_EV_RINGS];
	struct mhi_event_ctxt *mhi_ec_list;
	u32 padding;
};

@@ -371,8 +382,8 @@ struct mhi_counters {
	u32 msi_disable_cntr;
	u32 msi_enable_cntr;
	u32 nr_irq_migrations;
	u32 msi_counter[NR_EV_RINGS];
	u32 ev_counter[NR_EV_RINGS];
	u32 *msi_counter;
	u32 *ev_counter;
	atomic_t outbound_acks;
	u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
};
@@ -412,6 +423,9 @@ struct dev_mmio_info {
	void __iomem *event_db_addr;
	void __iomem *cmd_db_addr;
	u64 mmio_len;
	u32 nr_event_rings;
	dma_addr_t dma_ev_ctxt; /* Bus address of ECABAP*/
	void *dma_ev_rings;
};

struct mhi_device_ctxt {
@@ -424,7 +438,7 @@ struct mhi_device_ctxt {
	struct mhi_meminfo *mhi_ctrl_seg_info;

	struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
	struct mhi_ring mhi_local_event_ctxt[NR_EV_RINGS];
	struct mhi_ring *mhi_local_event_ctxt;
	struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];

	struct mutex *mhi_chan_mutex;
@@ -432,6 +446,7 @@ struct mhi_device_ctxt {
	spinlock_t *mhi_ev_spinlock_list;
	struct mutex *mhi_cmd_mutex_list;
	struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
	struct mhi_event_ring_cfg *ev_ring_props;
	struct task_struct *event_thread_handle;
	struct task_struct *st_thread_handle;
	struct mhi_wait_queues mhi_ev_wq;
@@ -445,9 +460,6 @@ struct mhi_device_ctxt {
	enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];

	u32 cmd_ring_order;
	u32 alloced_ev_rings[NR_EV_RINGS];
	u32 ev_ring_props[NR_EV_RINGS];

	struct mhi_counters counters;
	struct mhi_flags flags;

@@ -477,7 +489,6 @@ struct mhi_device_ctxt {

struct mhi_pcie_dev_info {
	struct pcie_core_info core;
	atomic_t ref_count;
	struct mhi_device_ctxt mhi_ctxt;
	struct msm_pcie_register_event mhi_pci_link_event;
	struct pci_dev *pcie_device;
@@ -493,6 +504,16 @@ struct mhi_pcie_devices {
	s32 nr_of_devices;
};

struct mhi_event_ring_cfg {
	u32 nr_desc;
	u32 msi_vec;
	u32 intmod;
	u32 flags;
	enum MHI_EVENT_RING_STATE state;
	irqreturn_t (*mhi_handler_ptr)(int , void *);
};

irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id);
enum MHI_STATUS mhi_reset_all_thread_queues(
					struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_add_elements_to_event_rings(
@@ -504,7 +525,7 @@ enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *loc_1,
enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
				struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATUS mhi_init_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
		u32 nr_ev_el, u32 event_ring_index);
/*Mhi Initialization functions */
enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -521,7 +542,11 @@ enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
				   u64 el_per_ring,
				   enum MHI_CHAN_TYPE chan_type,
				   u32 event_ring,
		struct mhi_ring *ring);
				   struct mhi_ring *ring,
				   enum MHI_CHAN_STATE chan_state);
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
					      u32 chan);
enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp,
			 void **wp, void **assigned_addr);
enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring, void **assigned_addr);
@@ -532,6 +557,8 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
	struct mhi_ring *ring, enum MHI_RING_TYPE ring_type, u32 ring_index);
enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt,
					union mhi_event_pkt *event);
enum MHI_EVENT_CCS get_cmd_pkt(union mhi_event_pkt *ev_pkt,
			       union mhi_cmd_pkt **cmd_pkt);
enum MHI_STATUS parse_cmd_event(struct mhi_device_ctxt *ctxt,
					union mhi_event_pkt *event);
int parse_event_thread(void *ctxt);
@@ -587,7 +614,15 @@ u32 mhi_reg_read_field(void __iomem *io_addr, uintptr_t io_offset,
			 u32 mask, u32 shift);
void mhi_exit_m2(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_runtime_suspend(struct device *dev);
int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
		   struct mhi_chan_info *chan_info);
int mhi_runtime_resume(struct device *dev);
enum MHI_STATUS mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
		  enum MHI_TYPE_EVENT_RING type);
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
				int index);
int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt);

#endif
+325 −0
Original line number Diff line number Diff line
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 */


#include <linux/types.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>

#include "mhi.h"
#include "mhi_macros.h"
#include "mhi_sys.h"

int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int r, i;
	char dt_prop[MAX_BUF_SIZE];
	const struct device_node *np =
		mhi_dev_ctxt->dev_info->plat_dev->dev.of_node;

	r = of_property_read_u32(np, "mhi-event-rings",
			&mhi_dev_ctxt->mmio_info.nr_event_rings);
	if (r) {
		mhi_log(MHI_MSG_CRITICAL,
			"Failed to pull event ring info from DT, %d\n", r);
		goto dt_error;
	}
	mhi_dev_ctxt->ev_ring_props =
				kzalloc(sizeof(struct mhi_event_ring_cfg) *
					mhi_dev_ctxt->mmio_info.nr_event_rings,
					GFP_KERNEL);
	if (!mhi_dev_ctxt->ev_ring_props) {
		r = -ENOMEM;
		goto dt_error;
	}

	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
		scnprintf(dt_prop, MAX_BUF_SIZE, "%s%d", "mhi-event-cfg-", i);
		r = of_property_read_u32_array(np, dt_prop,
					(u32 *)&mhi_dev_ctxt->ev_ring_props[i],
					4);
		if (r) {
			mhi_log(MHI_MSG_CRITICAL,
				"Failed to pull ev ring %d info from DT %d\n",
				i, r);
			goto dt_error;
		}
		mhi_log(MHI_MSG_INFO,
			"Pulled ev ring %d, desc: 0x%x, msi_vec: 0x%x, intmod %d flags 0x%x\n",
			i, mhi_dev_ctxt->ev_ring_props[i].nr_desc,
			   mhi_dev_ctxt->ev_ring_props[i].msi_vec,
			   mhi_dev_ctxt->ev_ring_props[i].intmod,
			   mhi_dev_ctxt->ev_ring_props[i].flags);
		if (GET_EV_PROPS(EV_MANAGED,
			mhi_dev_ctxt->ev_ring_props[i].flags))
			mhi_dev_ctxt->ev_ring_props[i].mhi_handler_ptr =
							mhi_msi_handlr;
		else
			mhi_dev_ctxt->ev_ring_props[i].mhi_handler_ptr =
							mhi_msi_ipa_handlr;
	}
dt_error:
	return r;
}

int create_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int r = 0, i;
	struct mhi_event_ctxt *ev_ctxt = NULL;

	size_t ctxt_size = sizeof(struct mhi_event_ctxt) *
				    mhi_dev_ctxt->mmio_info.nr_event_rings;
	/* Allocate the event contexts in uncached memory */
	mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list =
				dma_alloc_coherent(
				&mhi_dev_ctxt->dev_info->plat_dev->dev,
				ctxt_size,
				&mhi_dev_ctxt->mmio_info.dma_ev_ctxt,
				GFP_KERNEL);
	if (!mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list)
		return -ENOMEM;

	mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
	if (!mhi_dev_ctxt->mhi_local_event_ctxt) {
		r = -ENOMEM;
		goto free_ec_list;
	}
	mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
	if (!mhi_dev_ctxt->counters.ev_counter) {
		r = -ENOMEM;
		goto free_local_ec_list;
	}
	mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
	if (!mhi_dev_ctxt->counters.msi_counter) {
		r = -ENOMEM;
		goto free_ev_counter;
	}

	mhi_dev_ctxt->mmio_info.dma_ev_rings = kzalloc(sizeof(void *) *
				     mhi_dev_ctxt->mmio_info.nr_event_rings,
				     GFP_KERNEL);
	if (!mhi_dev_ctxt->mmio_info.dma_ev_rings) {
		r = -ENOMEM;
		goto free_msi_counter;
	}
	mhi_log(MHI_MSG_INFO, "Allocated ECABAP at Virt: 0x%p, Phys 0x%lx\n",
			mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list,
			(uintptr_t)mhi_dev_ctxt->mmio_info.dma_ev_ctxt);

	/* Allocate event ring elements for each ring */
	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
		dma_addr_t ring_base_addr;
		ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i];
		mhi_dev_ctxt->mhi_local_event_ctxt[i].base =
			dma_alloc_coherent(
				&mhi_dev_ctxt->dev_info->plat_dev->dev,
				sizeof(union mhi_event_pkt) *
				mhi_dev_ctxt->ev_ring_props[i].nr_desc,
				&ring_base_addr,
				GFP_KERNEL);
		if (!mhi_dev_ctxt->mhi_local_event_ctxt[i].base) {
			r = -ENOMEM;
			goto free_event_ring;
		}

		ev_ctxt->mhi_event_ring_base_addr = ring_base_addr;
		ev_ctxt->mhi_event_read_ptr = ring_base_addr;
		ev_ctxt->mhi_event_write_ptr = ring_base_addr;

		mhi_dev_ctxt->mhi_local_event_ctxt[i].wp =
				mhi_dev_ctxt->mhi_local_event_ctxt[i].base;
		mhi_dev_ctxt->mhi_local_event_ctxt[i].rp =
				mhi_dev_ctxt->mhi_local_event_ctxt[i].base;
		mhi_log(MHI_MSG_INFO, "Allocated Event Ring %d\n", i);
	}
	return r;

free_event_ring:
	for (; i > 0; --i) {
		ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[i];
		dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
				  sizeof(union mhi_event_pkt *) *
				  mhi_dev_ctxt->ev_ring_props[i].nr_desc,
				  mhi_dev_ctxt->mhi_local_event_ctxt[i].base,
				  ev_ctxt->mhi_event_ring_base_addr);
	}
	kfree(mhi_dev_ctxt->mmio_info.dma_ev_rings);
free_msi_counter:
	kfree(mhi_dev_ctxt->counters.msi_counter);
free_ev_counter:
	kfree(mhi_dev_ctxt->counters.ev_counter);
free_local_ec_list:
	kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
free_ec_list:
	dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
				ctxt_size,
				mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list,
				mhi_dev_ctxt->mmio_info.dma_ev_ctxt);
	return r;
}
void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
	struct mhi_ring *event_ctxt = NULL;
	u64 db_value = 0;
	event_ctxt =
		&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
	db_value = virt_to_dma(NULL, event_ctxt->wp);
	mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
					event_ring_index, db_value);
}

static enum MHI_STATUS mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
				struct mhi_ring *ring, u32 el_per_ring,
				u32 intmodt_val, u32 msi_vec)
{
	ev_list->mhi_event_er_type  = MHI_EVENT_RING_TYPE_VALID;
	ev_list->mhi_msi_vector     = msi_vec;
	ev_list->mhi_event_ring_len = el_per_ring*sizeof(union mhi_event_pkt);
	MHI_SET_EV_CTXT(EVENT_CTXT_INTMODT, ev_list, intmodt_val);
	ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
	ring->el_size = sizeof(union mhi_event_pkt);
	ring->overwrite_en = 0;
	/* Flush writes to MMIO */
	wmb();
	return MHI_STATUS_SUCCESS;
}

int init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int i;
	struct mhi_ring *mhi_local_event_ctxt = NULL;
	struct mhi_event_ctxt *event_ctxt;
	struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg;

	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
		event_ctxt = &mhi_ctrl->mhi_ec_list[i];
		mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
		mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
			mhi_dev_ctxt->ev_ring_props[i].nr_desc,
			mhi_dev_ctxt->ev_ring_props[i].intmod,
			mhi_dev_ctxt->ev_ring_props[i].msi_vec);
	}
	return 0;
}

int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
		  enum MHI_TYPE_EVENT_RING type)
{
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	u32 i;

	mhi_log(MHI_MSG_INFO, "Entered\n");
	for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
		if (GET_EV_PROPS(EV_TYPE,
			mhi_dev_ctxt->ev_ring_props[i].flags) == type &&
		    !mhi_dev_ctxt->ev_ring_props[i].state) {
			ret_val = mhi_init_local_event_ring(mhi_dev_ctxt,
					mhi_dev_ctxt->ev_ring_props[i].nr_desc,
					i);
			if (ret_val)
				return ret_val;
		}
		ring_ev_db(mhi_dev_ctxt, i);
		mhi_log(MHI_MSG_INFO, "Finished ev ring init %d\n", i);
	}
	mhi_log(MHI_MSG_INFO, "Exited\n");
	return 0;
}

enum MHI_STATUS mhi_add_elements_to_event_rings(
					struct mhi_device_ctxt *mhi_dev_ctxt,
					enum STATE_TRANSITION new_state)
{
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;

	switch (new_state) {
	case STATE_TRANSITION_READY:
		ret_val = init_local_ev_ring_by_type(mhi_dev_ctxt,
							MHI_ER_CTRL_TYPE);
		break;
	case STATE_TRANSITION_AMSS:
		ret_val = init_local_ev_ring_by_type(mhi_dev_ctxt,
							MHI_ER_DATA_TYPE);
		break;
	default:
		mhi_log(MHI_MSG_ERROR,
			"Unrecognized event stage, %d\n", new_state);
		ret_val = MHI_STATUS_ERROR;
		break;
	}
	return ret_val;
}

enum MHI_STATUS mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
					u32 nr_ev_el, u32 ring_index)
{
	union mhi_event_pkt *ev_pkt = NULL;
	u32 i = 0;
	unsigned long flags = 0;
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	spinlock_t *lock =
		&mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
	struct mhi_ring *event_ctxt =
		&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];

	if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
		mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
		return MHI_STATUS_ERROR;
	}

	spin_lock_irqsave(lock, flags);

	mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n",
			mhi_dev_ctxt->mmio_info.mmio_addr,
			mhi_dev_ctxt->mmio_info.mmio_len);
	mhi_log(MHI_MSG_INFO, "Initializing event ring %d\n", ring_index);

	for (i = 0; i < nr_ev_el - 1; ++i) {
		ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt);
		if (MHI_STATUS_SUCCESS != ret_val) {
			mhi_log(MHI_MSG_ERROR,
				"Failed to insert el in ev ctxt\n");
			ret_val = MHI_STATUS_ERROR;
			break;
		}
	}
	mhi_dev_ctxt->ev_ring_props[ring_index].state = MHI_EVENT_RING_INIT;
	spin_unlock_irqrestore(lock, flags);
	return ret_val;
}

void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
				int index)
{
	struct mhi_event_ctxt *ev_ctxt;
	struct mhi_ring *local_ev_ctxt;
	mhi_log(MHI_MSG_VERBOSE, "Resetting event index %d\n", index);
	ev_ctxt =
	    &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[index];
	local_ev_ctxt =
	    &mhi_dev_ctxt->mhi_local_event_ctxt[index];
	ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
	ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
	local_ev_ctxt->rp = local_ev_ctxt->base;
	local_ev_ctxt->wp = local_ev_ctxt->base;
	/* Flush writes to MMIO */
	wmb();
}
+20 −10
Original line number Diff line number Diff line
@@ -62,8 +62,10 @@ static void mhi_msm_fixup(struct pci_dev *pcie_device)
int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
{
	int ret_val = 0;
	u32 i = 0;
	u32 i = 0, j = 0;
	u32 retry_count = 0;
	u32 msi_number = 32;
	struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
	struct pci_dev *pcie_device = NULL;

	if (NULL == mhi_pcie_dev)
@@ -109,29 +111,37 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
	}

	device_disable_async_suspend(&pcie_device->dev);
	ret_val = pci_enable_msi_block(pcie_device, MAX_NR_MSI + 1);
	ret_val = pci_enable_msi_block(pcie_device, msi_number);
	if (0 != ret_val) {
		mhi_log(MHI_MSG_ERROR,
			"Failed to enable MSIs for pcie dev ret_val %d.\n",
			ret_val);
		goto msi_config_err;
	}
	for (i = 0; i < MAX_NR_MSI; ++i) {
		ret_val = request_irq(pcie_device->irq + i,
					mhi_msi_handlr,
	mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;

	for (j = 0; j < mhi_dev_ctxt->mmio_info.nr_event_rings; j++) {
		mhi_log(MHI_MSG_VERBOSE,
				"MSI_number = %d, event ring number = %d\n",
				mhi_dev_ctxt->ev_ring_props[j].msi_vec, j);

		ret_val = request_irq(pcie_device->irq +
				mhi_dev_ctxt->ev_ring_props[j].msi_vec,
				mhi_dev_ctxt->ev_ring_props[j].mhi_handler_ptr,
				IRQF_NO_SUSPEND,
				"mhi_drv",
				(void *)&pcie_device->dev);
		if (ret_val) {
			mhi_log(MHI_MSG_ERROR,
				"Failed to register handler for MSI.\n");
			   "Failed to register handler for MSI ret_val = %d\n",
			   ret_val);
			goto msi_config_err;
		}
	}
	mhi_pcie_dev->core.irq_base = pcie_device->irq;
	mhi_log(MHI_MSG_VERBOSE,
		"Setting IRQ Base to 0x%x\n", mhi_pcie_dev->core.irq_base);
	mhi_pcie_dev->core.max_nr_msis = MAX_NR_MSI;
	mhi_pcie_dev->core.max_nr_msis = msi_number;
	do  {
		ret_val = mhi_init_gpios(mhi_pcie_dev);
		switch (ret_val) {
Loading