Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d45d90b4 authored by Andrei Danaila's avatar Andrei Danaila Committed by Matt Wagantall
Browse files

mhi: core: Clean-up MHI context data structure



Remove unused MHI data structure and reorganize the main
MHI context data structure for easier debugging.

Change-Id: I2658bef7fcda95181bf8d80f58991277cf854449
Signed-off-by: default avatarAndrei Danaila <adanaila@codeaurora.org>
parent 1fc96da5
Loading
Loading
Loading
Loading
+43 −50
Original line number Diff line number Diff line
@@ -200,13 +200,6 @@ struct __packed mhi_reset_chan_cmd_pkt {
	u32 info;
};

struct __packed mhi_stop_chan_cmd_pkt {
	u32 reserved1;
	u32 reserved2;
	u32 reserved3;
	u32 info;
};

struct __packed mhi_ee_state_change_event {
	u64 reserved1;
	u32 exec_env;
@@ -238,7 +231,6 @@ union __packed mhi_xfer_pkt {
};

union __packed mhi_cmd_pkt {
	struct mhi_stop_chan_cmd_pkt stop_cmd_pkt;
	struct mhi_reset_chan_cmd_pkt reset_cmd_pkt;
	struct mhi_noop_cmd_pkt noop_cmd_pkt;
	struct mhi_noop_cmd_pkt type;
@@ -272,6 +264,7 @@ struct mhi_ring {
	uintptr_t len;
	uintptr_t el_size;
	u32 overwrite_en;
	enum MHI_CHAN_TYPE dir;
};

enum MHI_CMD_STATUS {
@@ -355,19 +348,14 @@ struct mhi_state_work_queue {

struct mhi_control_seg {
	union mhi_xfer_pkt *xfer_trb_list[MHI_MAX_CHANNELS];
	union mhi_event_pkt *ev_trb_list[EVENT_RINGS_ALLOCATED];
	union mhi_event_pkt *ev_trb_list[NR_EV_RINGS];
	union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1];
	struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS];
	struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS];
	struct mhi_event_ctxt mhi_ec_list[EVENT_RINGS_ALLOCATED];
	struct mhi_event_ctxt mhi_ec_list[NR_EV_RINGS];
	u32 padding;
};

struct mhi_chan_counters {
	u32 pkts_xferd;
	u32 ev_processed;
};

struct mhi_counters {
	u32 m0_m1;
	u32 m1_m0;
@@ -383,7 +371,10 @@ struct mhi_counters {
	u32 msi_disable_cntr;
	u32 msi_enable_cntr;
	u32 nr_irq_migrations;
	u32 msi_counter[NR_EV_RINGS];
	u32 ev_counter[NR_EV_RINGS];
	atomic_t outbound_acks;
	u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
};

struct mhi_flags {
@@ -397,29 +388,45 @@ struct mhi_flags {
	atomic_t pending_resume;
	atomic_t pending_ssr;
	atomic_t pending_powerup;
	atomic_t m2_transition;
	int stop_threads;
	atomic_t device_wake;
	u32 ssr;
	u32 ev_thread_stopped;
	u32 st_thread_stopped;
	u32 uldl_enabled;
	u32 db_mode[MHI_MAX_CHANNELS];
};

struct mhi_device_ctxt {
	struct mhi_pcie_dev_info *dev_info;
	struct pcie_core_info *dev_props;
struct mhi_wait_queues {
	wait_queue_head_t *mhi_event_wq;
	wait_queue_head_t *state_change_event;
	wait_queue_head_t *m0_event;
	wait_queue_head_t *m3_event;
	wait_queue_head_t *bhi_event;
};

struct dev_mmio_info {
	void __iomem *mmio_addr;
	void __iomem *channel_db_addr;
	void __iomem *chan_db_addr;
	void __iomem *event_db_addr;
	void __iomem *cmd_db_addr;
	struct mhi_control_seg *mhi_ctrl_seg;
	struct mhi_meminfo *mhi_ctrl_seg_info;
	u64 nr_of_cc;
	u64 nr_of_ec;
	u64 nr_of_cmdc;
	u64 mmio_len;
};

struct mhi_device_ctxt {
	enum MHI_STATE mhi_state;
	enum MHI_EXEC_ENV dev_exec_env;
	u64 mmio_len;

	struct mhi_pcie_dev_info *dev_info;
	struct pcie_core_info *dev_props;
	struct mhi_control_seg *mhi_ctrl_seg;
	struct mhi_meminfo *mhi_ctrl_seg_info;

	struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
	struct mhi_ring mhi_local_event_ctxt[MHI_MAX_CHANNELS];
	struct mhi_ring mhi_local_event_ctxt[NR_EV_RINGS];
	struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];

	struct mutex *mhi_chan_mutex;
	struct mutex mhi_link_state;
	spinlock_t *mhi_ev_spinlock_list;
@@ -427,48 +434,33 @@ struct mhi_device_ctxt {
	struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
	struct task_struct *event_thread_handle;
	struct task_struct *st_thread_handle;
	u32 ev_thread_stopped;
	u32 st_thread_stopped;
	wait_queue_head_t *event_handle;
	wait_queue_head_t *state_change_event_handle;
	wait_queue_head_t *M0_event;
	wait_queue_head_t *M3_event;
	wait_queue_head_t *bhi_event;
	wait_queue_head_t *chan_start_complete;
	struct mhi_wait_queues mhi_ev_wq;
	struct dev_mmio_info mmio_info;

	u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
	u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
	spinlock_t *db_write_lock;

	struct platform_device *mhi_uci_dev;
	struct platform_device *mhi_rmnet_dev;
	atomic_t link_ops_flag;

	struct mhi_state_work_queue state_change_work_item_list;
	enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];

	u32 cmd_ring_order;
	u32 alloced_ev_rings[EVENT_RINGS_ALLOCATED];
	u32 ev_ring_props[EVENT_RINGS_ALLOCATED];
	u32 msi_counter[EVENT_RINGS_ALLOCATED];
	u32 db_mode[MHI_MAX_CHANNELS];
	u32 uldl_enabled;
	u32 hw_intmod_rate;
	u32 outbound_evmod_rate;
	u32 alloced_ev_rings[NR_EV_RINGS];
	u32 ev_ring_props[NR_EV_RINGS];

	struct mhi_counters counters;
	struct mhi_flags flags;

	u32 device_wake_asserted;

	rwlock_t xfer_lock;
	atomic_t m2_transition;
	struct hrtimer m1_timer;
	ktime_t m1_timeout;
	ktime_t ul_acc_tmr_timeout;
	struct mhi_chan_counters mhi_chan_cntr[MHI_MAX_CHANNELS];
	u32 ev_counter[MHI_MAX_CHANNELS];
	u32 bus_client;

	struct esoc_desc *esoc_handle;
	void *esoc_ssr_handle;

	u32 bus_client;
	struct msm_bus_scale_pdata *bus_scale_table;
	struct notifier_block mhi_cpu_notifier;

@@ -477,6 +469,7 @@ struct mhi_device_ctxt {
	atomic_t outbound_acks;
	struct mutex pm_lock;
	struct wakeup_source w_lock;

	int enable_lpm;
	char *chan_info;
	struct dentry *mhi_parent_folder;
+2 −2
Original line number Diff line number Diff line
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -47,7 +47,7 @@ static ssize_t bhi_write(struct file *file,
	if (count > BHI_MAX_IMAGE_SIZE)
		return -ENOMEM;

	wait_event_interruptible(*mhi_dev_ctxt->bhi_event,
	wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
			mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);

	mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%x\n", count);
+3 −2
Original line number Diff line number Diff line
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -156,7 +156,8 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
	if (!mhi_init_debugfs(&mhi_pcie_dev->mhi_ctxt))
		mhi_log(MHI_MSG_ERROR, "Failed to init debugfs.\n");

	mhi_pcie_dev->mhi_ctxt.mmio_addr = mhi_pcie_dev->core.bar0_base;
	mhi_pcie_dev->mhi_ctxt.mmio_info.mmio_addr =
						mhi_pcie_dev->core.bar0_base;
	pcie_device->dev.platform_data = &mhi_pcie_dev->mhi_ctxt;
	mhi_pcie_dev->mhi_ctxt.dev_info->plat_dev->dev.platform_data =
						&mhi_pcie_dev->mhi_ctxt;
+40 −48
Original line number Diff line number Diff line
@@ -26,9 +26,6 @@ static enum MHI_STATUS mhi_create_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
	if (NULL == mhi_dev_ctxt)
		return MHI_STATUS_ALLOC_ERROR;
	mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
	mhi_dev_ctxt->nr_of_cc = MHI_MAX_CHANNELS;
	mhi_dev_ctxt->nr_of_ec = EVENT_RINGS_ALLOCATED;
	mhi_dev_ctxt->nr_of_cmdc = NR_OF_CMD_RINGS;

	mhi_dev_ctxt->alloced_ev_rings[PRIMARY_EVENT_RING] = 0;
	mhi_dev_ctxt->alloced_ev_rings[IPA_OUT_EV_RING] = IPA_OUT_EV_RING;
@@ -44,7 +41,7 @@ static enum MHI_STATUS mhi_create_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
			mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING],
			MHI_EVENT_POLLING_DISABLED);

	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
	for (i = 0; i < NR_EV_RINGS; ++i) {
		MHI_SET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
				mhi_dev_ctxt->ev_ring_props[i],
				i);
@@ -63,9 +60,9 @@ enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt,
		mhi_freememregion(mhi_dev_ctxt->mhi_ctrl_seg_info);
	case MHI_INIT_ERROR_STAGE_THREAD_QUEUES:
	case MHI_INIT_ERROR_STAGE_THREADS:
		kfree(mhi_dev_ctxt->event_handle);
		kfree(mhi_dev_ctxt->state_change_event_handle);
		kfree(mhi_dev_ctxt->M0_event);
		kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
		kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
		kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
	case MHI_INIT_ERROR_STAGE_EVENTS:
		kfree(mhi_dev_ctxt->mhi_ctrl_seg_info);
	case MHI_INIT_ERROR_STAGE_MEM_ZONES:
@@ -87,7 +84,7 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
	u32 i = 0;

	mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
							MHI_MAX_CHANNELS,
							NR_EV_RINGS,
							GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
		goto ev_mutex_free;
@@ -104,18 +101,18 @@ static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
						MHI_MAX_CHANNELS, GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->db_write_lock)
		goto db_write_lock_free;
	for (i = 0; i < mhi_dev_ctxt->nr_of_cc; ++i)
		mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
	for (i = 0; i < NR_EV_RINGS; ++i)
		spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
	for (i = 0; i < mhi_dev_ctxt->nr_of_cmdc; ++i)
	for (i = 0; i < NR_OF_CMD_RINGS; ++i)
		mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
	rwlock_init(&mhi_dev_ctxt->xfer_lock);
	mutex_init(&mhi_dev_ctxt->mhi_link_state);
	mutex_init(&mhi_dev_ctxt->pm_lock);
	atomic_set(&mhi_dev_ctxt->m2_transition, 0);
	atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
	return MHI_STATUS_SUCCESS;

db_write_lock_free:
@@ -142,65 +139,59 @@ static enum MHI_STATUS mhi_init_ctrl_zone(struct mhi_pcie_dev_info *dev_info,
static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
{

	mhi_dev_ctxt->event_handle = kmalloc(sizeof(wait_queue_head_t),
	mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq = kmalloc(
						sizeof(wait_queue_head_t),
						GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->event_handle) {
	if (NULL == mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq) {
		mhi_log(MHI_MSG_ERROR, "Failed to init event");
		return MHI_STATUS_ERROR;
	}
	mhi_dev_ctxt->state_change_event_handle =
	mhi_dev_ctxt->mhi_ev_wq.state_change_event =
				kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->state_change_event_handle) {
	if (NULL == mhi_dev_ctxt->mhi_ev_wq.state_change_event) {
		mhi_log(MHI_MSG_ERROR, "Failed to init event");
		goto error_event_handle_alloc;
	}
	/* Initialize the event which signals M0 */
	mhi_dev_ctxt->M0_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->M0_event) {
	mhi_dev_ctxt->mhi_ev_wq.m0_event = kmalloc(sizeof(wait_queue_head_t),
								GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->mhi_ev_wq.m0_event) {
		mhi_log(MHI_MSG_ERROR, "Failed to init event");
		goto error_state_change_event_handle;
	}
	/* Initialize the event which signals M0 */
	mhi_dev_ctxt->M3_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->M3_event) {
	mhi_dev_ctxt->mhi_ev_wq.m3_event = kmalloc(sizeof(wait_queue_head_t),
								GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->mhi_ev_wq.m3_event) {
		mhi_log(MHI_MSG_ERROR, "Failed to init event");
		goto error_M0_event;
		goto error_m0_event;
	}
	/* Initialize the event which signals M0 */
	mhi_dev_ctxt->bhi_event = kmalloc(sizeof(wait_queue_head_t),
	mhi_dev_ctxt->mhi_ev_wq.bhi_event = kmalloc(sizeof(wait_queue_head_t),
								GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->bhi_event) {
	if (NULL == mhi_dev_ctxt->mhi_ev_wq.bhi_event) {
		mhi_log(MHI_MSG_ERROR, "Failed to init event");
		goto error_bhi_event;
	}
	mhi_dev_ctxt->chan_start_complete =
				kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
	if (NULL == mhi_dev_ctxt->chan_start_complete) {
		mhi_log(MHI_MSG_ERROR, "Failed to init event");
		goto error_chan_complete;
	}
	/* Initialize the event which starts the event parsing thread */
	init_waitqueue_head(mhi_dev_ctxt->event_handle);
	init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
	/* Initialize the event which starts the state change thread */
	init_waitqueue_head(mhi_dev_ctxt->state_change_event_handle);
	init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
	/* Initialize the event which triggers clients waiting to send */
	init_waitqueue_head(mhi_dev_ctxt->M0_event);
	init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m0_event);
	/* Initialize the event which triggers D3hot */
	init_waitqueue_head(mhi_dev_ctxt->M3_event);
	init_waitqueue_head(mhi_dev_ctxt->bhi_event);
	init_waitqueue_head(mhi_dev_ctxt->chan_start_complete);
	init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m3_event);
	init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.bhi_event);

	return MHI_STATUS_SUCCESS;
error_chan_complete:
	kfree(mhi_dev_ctxt->bhi_event);
error_bhi_event:
	kfree(mhi_dev_ctxt->M3_event);
error_M0_event:
	kfree(mhi_dev_ctxt->M0_event);
	kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
error_m0_event:
	kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
error_state_change_event_handle:
	kfree(mhi_dev_ctxt->state_change_event_handle);
	kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
error_event_handle_alloc:
	kfree(mhi_dev_ctxt->event_handle);
	kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
	return MHI_STATUS_ERROR;
}

@@ -261,7 +252,7 @@ static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt
	}
	ctrl_seg_size += align_len - (ctrl_seg_size % align_len);

	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i)
	for (i = 0; i < NR_EV_RINGS; ++i)
		ctrl_seg_size += sizeof(union mhi_event_pkt)*
					(EV_EL_PER_RING + ELEMENT_GAP);

@@ -297,7 +288,7 @@ static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt
	}

	ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len);
	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
	for (i = 0; i < NR_EV_RINGS; ++i) {
		mhi_dev_ctxt->mhi_ctrl_seg->ev_trb_list[i] =
			(union mhi_event_pkt *)ctrl_seg_offset;
		ctrl_seg_offset += sizeof(union mhi_event_pkt) *
@@ -391,7 +382,7 @@ static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
	u32 intmod_t = 0;
	uintptr_t ev_ring_addr;

	for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
	for (i = 0; i < NR_EV_RINGS; ++i) {
		MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC,
					mhi_dev_ctxt->ev_ring_props[i],
					msi_vec);
@@ -448,7 +439,7 @@ static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt)
				(uintptr_t)trb_list,
				MAX_NR_TRBS_PER_HARD_CHAN,
				(i % 2) ? MHI_IN : MHI_OUT,
				EVENT_RINGS_ALLOCATED - (MHI_MAX_CHANNELS - i),
				NR_EV_RINGS - (MHI_MAX_CHANNELS - i),
				&mhi_dev_ctxt->mhi_local_chan_ctxt[i]);
		}
	}
@@ -591,8 +582,9 @@ enum MHI_STATUS mhi_init_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,

	spin_lock_irqsave(lock, flags);

	mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n",
			mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_len);
	mhi_log(MHI_MSG_INFO, "mmio_info.mmio_addr = 0x%p, mmio_len = 0x%llx\n",
			mhi_dev_ctxt->mmio_info.mmio_addr,
			mhi_dev_ctxt->mmio_info.mmio_len);
	mhi_log(MHI_MSG_INFO,
			"Initializing event ring %d\n", event_ring_index);

+9 −7
Original line number Diff line number Diff line
@@ -28,7 +28,8 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
		mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
		return IRQ_HANDLED;
	}
	mhi_dev_ctxt->msi_counter[IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
	mhi_dev_ctxt->counters.msi_counter[
			IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
	mhi_log(MHI_MSG_VERBOSE,
		"Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
	trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
@@ -36,7 +37,7 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
	case 0:
	case 1:
		atomic_inc(&mhi_dev_ctxt->flags.events_pending);
		wake_up_interruptible(mhi_dev_ctxt->event_handle);
		wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
		break;
	case 2:
		client_index = MHI_CLIENT_IP_HW_0_IN;
@@ -182,13 +183,14 @@ int parse_event_thread(void *ctxt)
	/* Go through all event rings */
	for (;;) {
		ret_val =
			wait_event_interruptible(*mhi_dev_ctxt->event_handle,
			wait_event_interruptible(
				*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
				((atomic_read(
				&mhi_dev_ctxt->flags.events_pending) > 0) &&
					!mhi_dev_ctxt->flags.stop_threads) ||
				mhi_dev_ctxt->flags.kill_threads ||
				(mhi_dev_ctxt->flags.stop_threads &&
				!mhi_dev_ctxt->ev_thread_stopped));
				!mhi_dev_ctxt->flags.ev_thread_stopped));

		switch (ret_val) {
		case -ERESTARTSYS:
@@ -201,15 +203,15 @@ int parse_event_thread(void *ctxt)
				return 0;
			}
			if (mhi_dev_ctxt->flags.stop_threads) {
				mhi_dev_ctxt->ev_thread_stopped = 1;
				mhi_dev_ctxt->flags.ev_thread_stopped = 1;
				continue;
			}
			break;
		}
		mhi_dev_ctxt->ev_thread_stopped = 0;
		mhi_dev_ctxt->flags.ev_thread_stopped = 0;
		atomic_dec(&mhi_dev_ctxt->flags.events_pending);

		for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) {
		for (i = 0; i < NR_EV_RINGS; ++i) {
			MHI_GET_EVENT_RING_INFO(EVENT_RING_POLLING,
					mhi_dev_ctxt->ev_ring_props[i],
					ev_poll_en)
Loading