Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9871bb4b authored by Sujeev Dias's avatar Sujeev Dias Committed by Tony Truong
Browse files

mhi: core: add support for priority based event rings



Expand the priority for event rings to allow support for low and
high priority event rings. Low priority events are processed in
a worker thread and high priority events use tasklets scheduled
with high priority.

CRs-Fixed: 2490298
Change-Id: Id9d0a8d3d1e84154643f101e150e1d8c40bb6f94
Acked-by: default avatarBhaumik Vasav Bhatt <bbhatt@qti.qualcomm.com>
Signed-off-by: default avatarSujeev Dias <sdias@codeaurora.org>
parent a4a8d06e
Loading
Loading
Loading
Loading
+23 −3
Original line number Diff line number Diff line
@@ -183,7 +183,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;

	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -207,7 +207,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
		return ret;

	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		ret = request_irq(mhi_cntrl->irq[mhi_event->msi],
@@ -224,7 +224,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)

error_request:
	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -887,6 +887,8 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
	if (!mhi_cntrl->mhi_event)
		return -ENOMEM;

	INIT_LIST_HEAD(&mhi_cntrl->lp_ev_rings);

	/* populate ev ring */
	mhi_event = mhi_cntrl->mhi_event;
	i = 0;
@@ -963,6 +965,19 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
							"mhi,client-manage");
		mhi_event->offload_ev = of_property_read_bool(child,
							      "mhi,offload");

		/*
		 * low priority events are handled in a separate worker thread
		 * to allow for sleeping functions to be called.
		 */
		if (!mhi_event->offload_ev) {
			if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
				list_add_tail(&mhi_event->node,
						&mhi_cntrl->lp_ev_rings);
			else
				mhi_event->request_irq = true;
		}

		mhi_event++;
	}

@@ -1242,6 +1257,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
	INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
	init_waitqueue_head(&mhi_cntrl->state_event);

	mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -1255,6 +1271,10 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)

		mhi_event->mhi_cntrl = mhi_cntrl;
		spin_lock_init(&mhi_event->lock);

		if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
			continue;

		if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE)
			tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
				     (ulong)mhi_event);
+14 −1
Original line number Diff line number Diff line
@@ -499,6 +499,15 @@ enum MHI_ER_TYPE {
	MHI_ER_TYPE_VALID = 0x1,
};

enum mhi_er_priority {
	MHI_ER_PRIORITY_HIGH,
	MHI_ER_PRIORITY_MEDIUM,
	MHI_ER_PRIORITY_LOW,
};

#define IS_MHI_ER_PRIORITY_LOW(ev) (ev->priority >= MHI_ER_PRIORITY_LOW)
#define IS_MHI_ER_PRIORITY_HIGH(ev) (ev->priority == MHI_ER_PRIORITY_HIGH)

enum mhi_er_data_type {
	MHI_ER_DATA_ELEMENT_TYPE,
	MHI_ER_CTRL_ELEMENT_TYPE,
@@ -586,17 +595,19 @@ struct mhi_buf_info {
};

struct mhi_event {
	struct list_head node;
	u32 er_index;
	u32 intmod;
	u32 msi;
	int chan; /* this event ring is dedicated to a channel */
	u32 priority;
	enum mhi_er_priority priority;
	enum mhi_er_data_type data_type;
	struct mhi_ring ring;
	struct db_cfg db_cfg;
	bool hw_ring;
	bool cl_manage;
	bool offload_ev; /* managed by a device driver */
	bool request_irq; /* has dedicated interrupt handler */
	spinlock_t lock;
	struct mhi_chan *mhi_chan; /* dedicated to channel */
	struct tasklet_struct task;
@@ -699,6 +710,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
void mhi_pm_st_worker(struct work_struct *work);
void mhi_fw_load_worker(struct work_struct *work);
void mhi_pm_sys_err_worker(struct work_struct *work);
void mhi_low_priority_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
@@ -752,6 +764,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
		      struct mhi_chan *mhi_chan);
int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
			      u32 *offset);
void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
+10 −2
Original line number Diff line number Diff line
@@ -255,7 +255,7 @@ static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
	return nr_el;
}

static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
{
	return (addr - ring->iommu_base) + ring->base;
}
@@ -1469,7 +1469,13 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev)

		if (mhi_dev)
			mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA);
	} else

		return IRQ_HANDLED;
	}

	if (IS_MHI_ER_PRIORITY_HIGH(mhi_event))
		tasklet_hi_schedule(&mhi_event->task);
	else
		tasklet_schedule(&mhi_event->task);

	return IRQ_HANDLED;
@@ -1539,6 +1545,8 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
	wake_up_all(&mhi_cntrl->state_event);
	MHI_VERB("Exit\n");

	schedule_work(&mhi_cntrl->low_priority_worker);

	return IRQ_WAKE_THREAD;
}

+52 −2
Original line number Diff line number Diff line
@@ -589,7 +589,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
	MHI_LOG("Waiting for all pending event ring processing to complete\n");
	mhi_event = mhi_cntrl->mhi_event;
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;
		tasklet_kill(&mhi_event->task);
	}
@@ -608,6 +608,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
	wake_up_all(&mhi_cntrl->state_event);
	flush_work(&mhi_cntrl->st_worker);
	flush_work(&mhi_cntrl->fw_worker);
	flush_work(&mhi_cntrl->low_priority_worker);

	mutex_lock(&mhi_cntrl->pm_mutex);

@@ -720,6 +721,44 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
	return 0;
}

static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
{
	struct mhi_event *mhi_event;

	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
		struct mhi_event_ctxt *er_ctxt =
			&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
		struct mhi_ring *ev_ring = &mhi_event->ring;

		spin_lock_bh(&mhi_event->lock);
		if (ev_ring->rp != mhi_to_virtual(ev_ring, er_ctxt->rp)) {
			schedule_work(&mhi_cntrl->low_priority_worker);
			spin_unlock_bh(&mhi_event->lock);
			break;
		}
		spin_unlock_bh(&mhi_event->lock);
	}
}

void mhi_low_priority_worker(struct work_struct *work)
{
	struct mhi_controller *mhi_cntrl = container_of(work,
							struct mhi_controller,
							low_priority_worker);
	struct mhi_event *mhi_event;

	MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
		 to_mhi_pm_state_str(mhi_cntrl->pm_state),
		 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
		 TO_MHI_EXEC_STR(mhi_cntrl->ee));

	/* check low priority event rings and process events */
	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
		if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
			mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
	}
}

void mhi_pm_sys_err_worker(struct work_struct *work)
{
	struct mhi_controller *mhi_cntrl = container_of(work,
@@ -1241,6 +1280,14 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
		return -EIO;
	}

	/*
	 * If MHI on host is in suspending/suspended state, we do not process
	 * any low priority requests, for example, bandwidth scaling events
	 * from the device. Check for low priority event rings and handle the
	 * pending events upon resume.
	 */
	mhi_low_priority_events_pending(mhi_cntrl);

	return 0;
}

@@ -1305,12 +1352,15 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
	 */
	mhi_event = mhi_cntrl->mhi_event;
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		mhi_msi_handlr(0, mhi_event);
	}

	/* schedules worker if any low priority events need to be handled */
	mhi_low_priority_events_pending(mhi_cntrl);

	MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
		to_mhi_pm_state_str(mhi_cntrl->pm_state),
		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+2 −0
Original line number Diff line number Diff line
@@ -240,6 +240,7 @@ struct mhi_controller {
	u32 msi_allocated;
	int *irq; /* interrupt table */
	struct mhi_event *mhi_event;
	struct list_head lp_ev_rings; /* low priority event rings */

	/* cmd rings */
	struct mhi_cmd *mhi_cmd;
@@ -278,6 +279,7 @@ struct mhi_controller {
	struct work_struct st_worker;
	struct work_struct fw_worker;
	struct work_struct syserr_worker;
	struct work_struct low_priority_worker;
	wait_queue_head_t state_event;

	/* shadow functions */