Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 79cec43d authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mhi: core: Add OOB and DB mode event IPC log and count"

parents 64091379 d8c5bb77
Loading
Loading
Loading
Loading
+2 −13
Original line number Diff line number Diff line
@@ -528,10 +528,9 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
	}
}

void mhi_fw_load_worker(struct work_struct *work)
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
	int ret;
	struct mhi_controller *mhi_cntrl;
	const char *fw_name;
	const struct firmware *firmware = NULL;
	struct image_info *image_info;
@@ -539,17 +538,7 @@ void mhi_fw_load_worker(struct work_struct *work)
	dma_addr_t dma_addr;
	size_t size;

	mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);

	MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
		TO_MHI_EXEC_STR(mhi_cntrl->ee));

	ret = wait_event_timeout(mhi_cntrl->state_event,
				 MHI_IN_PBL(mhi_cntrl->ee) ||
				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
				 msecs_to_jiffies(mhi_cntrl->timeout_ms));

	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
		MHI_ERR("MHI is not in valid state\n");
		return;
	}
+13 −7
Original line number Diff line number Diff line
@@ -978,7 +978,7 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
		goto err_put;
	}

	INIT_LIST_HEAD(&mhi_cntrl->lp_ev_rings);
	INIT_LIST_HEAD(&mhi_cntrl->sp_ev_rings);

	/* populate ev ring */
	mhi_event = mhi_cntrl->mhi_event;
@@ -1061,13 +1061,13 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
							      "mhi,offload");

		/*
		 * low priority events are handled in a separate worker thread
		 * special purpose events are handled in a separate kthread
		 * to allow for sleeping functions to be called.
		 */
		if (!mhi_event->offload_ev) {
			if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
			if (IS_MHI_ER_PRIORITY_SPECIAL(mhi_event))
				list_add_tail(&mhi_event->node,
						&mhi_cntrl->lp_ev_rings);
						&mhi_cntrl->sp_ev_rings);
			else
				mhi_event->request_irq = true;
		}
@@ -1360,10 +1360,15 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
	spin_lock_init(&mhi_cntrl->transition_lock);
	spin_lock_init(&mhi_cntrl->wlock);
	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
	init_waitqueue_head(&mhi_cntrl->state_event);

	mhi_cntrl->special_wq = alloc_ordered_workqueue("mhi_special_w",
						WQ_MEM_RECLAIM | WQ_HIGHPRI);
	if (!mhi_cntrl->special_wq)
		goto error_alloc_cmd;

	INIT_WORK(&mhi_cntrl->special_work, mhi_special_purpose_work);

	mhi_cmd = mhi_cntrl->mhi_cmd;
	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
		spin_lock_init(&mhi_cmd->lock);
@@ -1376,7 +1381,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
		mhi_event->mhi_cntrl = mhi_cntrl;
		spin_lock_init(&mhi_event->lock);

		if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
		if (IS_MHI_ER_PRIORITY_SPECIAL(mhi_event))
			continue;

		if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE)
@@ -1464,6 +1469,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)

error_alloc_dev:
	kfree(mhi_cntrl->mhi_cmd);
	destroy_workqueue(mhi_cntrl->special_wq);

error_alloc_cmd:
	vfree(mhi_cntrl->mhi_chan);
+9 −3
Original line number Diff line number Diff line
@@ -362,6 +362,8 @@ enum mhi_cmd_type {
#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)

#define MHI_RSC_MIN_CREDITS (8)

enum MHI_CMD {
	MHI_CMD_RESET_CHAN,
	MHI_CMD_START_CHAN,
@@ -539,10 +541,10 @@ enum MHI_ER_TYPE {
enum mhi_er_priority {
	MHI_ER_PRIORITY_HIGH,
	MHI_ER_PRIORITY_MEDIUM,
	MHI_ER_PRIORITY_LOW,
	MHI_ER_PRIORITY_SPECIAL,
};

#define IS_MHI_ER_PRIORITY_LOW(ev) (ev->priority >= MHI_ER_PRIORITY_LOW)
#define IS_MHI_ER_PRIORITY_SPECIAL(ev) (ev->priority >= MHI_ER_PRIORITY_SPECIAL)
#define IS_MHI_ER_PRIORITY_HIGH(ev) (ev->priority == MHI_ER_PRIORITY_HIGH)

enum mhi_er_data_type {
@@ -695,6 +697,9 @@ struct mhi_chan {
	struct completion completion;
	rwlock_t lock;
	struct list_head node;

	/* stats */
	u64 mode_change;
};

struct tsync_node {
@@ -747,8 +752,8 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
			       enum MHI_ST_TRANSITION state);
void mhi_pm_st_worker(struct work_struct *work);
void mhi_fw_load_worker(struct work_struct *work);
void mhi_special_purpose_work(struct work_struct *work);
void mhi_process_sys_err(struct mhi_controller *mhi_cntrl);
void mhi_low_priority_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
@@ -911,6 +916,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
int mhi_dtr_init(void);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
		      struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
			struct mhi_chan *mhi_chan);
void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl);
+57 −16
Original line number Diff line number Diff line
@@ -499,6 +499,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
	struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
	struct mhi_buf_info *buf_info;
	struct mhi_tre *mhi_tre;
	bool ring_db = true;
	int n_free_tre, n_queued_tre;

	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
		return -ENOMEM;
@@ -538,15 +540,27 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
		mhi_tre->dword[0] =
			MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base);
		mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1;
		/*
		 * on RSC channel IPA HW has a minimum credit requirement before
		 * switching to DB mode
		 */
		n_free_tre = mhi_get_no_free_descriptors(mhi_dev,
				DMA_FROM_DEVICE);
		n_queued_tre = tre_ring->elements - n_free_tre;
		read_lock_bh(&mhi_chan->lock);
		if (mhi_chan->db_cfg.db_mode &&
				n_queued_tre < MHI_RSC_MIN_CREDITS)
			ring_db = false;
		read_unlock_bh(&mhi_chan->lock);
	} else {
		mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
		mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
		mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(mhi_chan->bei, 1, 0, 0);
	}

	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
		 (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
		 mhi_tre->dword[0], mhi_tre->dword[1]);
	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x rDB %d\n",
		mhi_chan->chan, (u64)mhi_to_physical(tre_ring, mhi_tre),
		mhi_tre->ptr, mhi_tre->dword[0], mhi_tre->dword[1], ring_db);

	/* increment WP */
	mhi_add_ring_element(mhi_cntrl, tre_ring);
@@ -555,7 +569,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
	if (mhi_chan->dir == DMA_TO_DEVICE)
		atomic_inc(&mhi_cntrl->pending_pkts);

	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && ring_db) {
		read_lock_bh(&mhi_chan->lock);
		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		read_unlock_bh(&mhi_chan->lock);
@@ -972,6 +986,9 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
	u32 ev_code;
	struct mhi_result result;
	unsigned long flags = 0;
	bool ring_db = true;
	int n_free_tre, n_queued_tre;
	unsigned long rflags;

	ev_code = MHI_TRE_GET_EV_CODE(event);
	buf_ring = &mhi_chan->buf_ring;
@@ -1061,20 +1078,42 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
		break;
	} /* CC_EOT */
	case MHI_EV_CC_OOB:
	case MHI_EV_CC_DB_MODE:
	{
		unsigned long flags;
		mhi_chan->db_cfg.db_mode = true;
		mhi_chan->mode_change++;

		/*
		 * on RSC channel IPA HW has a minimum credit requirement before
		 * switching to DB mode
		 */
		if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) {
			n_free_tre = mhi_get_no_free_descriptors(
					mhi_chan->mhi_dev, DMA_FROM_DEVICE);
			n_queued_tre = tre_ring->elements - n_free_tre;
			if (n_queued_tre < MHI_RSC_MIN_CREDITS)
				ring_db = false;
		}

		MHI_VERB("OOB_MODE chan %d ring_db %d\n", mhi_chan->chan,
			ring_db);

		MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan);
		read_lock_irqsave(&mhi_cntrl->pm_lock, rflags);
		if (tre_ring->wp != tre_ring->rp &&
		    MHI_DB_ACCESS_VALID(mhi_cntrl) && ring_db)
			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		read_unlock_irqrestore(&mhi_cntrl->pm_lock, rflags);
		break;
	case MHI_EV_CC_DB_MODE:
		MHI_VERB("DB_MODE chan %d.\n", mhi_chan->chan);
		mhi_chan->db_cfg.db_mode = true;
		read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
		mhi_chan->mode_change++;

		read_lock_irqsave(&mhi_cntrl->pm_lock, rflags);
		if (tre_ring->wp != tre_ring->rp &&
		    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
		    MHI_DB_ACCESS_VALID(mhi_cntrl))
			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
		}
		read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);

		read_unlock_irqrestore(&mhi_cntrl->pm_lock, rflags);
		break;
	}
	case MHI_EV_CC_BAD_TRE:
		MHI_ASSERT(1, "Received BAD TRE event for ring");
		break;
@@ -1724,7 +1763,7 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
	MHI_VERB("Exit\n");

	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
		schedule_work(&mhi_cntrl->low_priority_worker);
		queue_work(mhi_cntrl->special_wq, &mhi_cntrl->special_work);

	return IRQ_WAKE_THREAD;
}
@@ -2176,12 +2215,14 @@ int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d)
				   chan_ctxt->pollcfg, chan_ctxt->chtype,
				   chan_ctxt->erindex);
			seq_printf(m,
				   " base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n",
				   " base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx mode_change:0x%llx\n",
				   chan_ctxt->rbase, chan_ctxt->rlen,
				   chan_ctxt->wp,
				   mhi_to_physical(ring, ring->rp),
				   mhi_to_physical(ring, ring->wp),
				   mhi_chan->db_cfg.db_val);
				   mhi_chan->db_cfg.db_val,
				   mhi_chan->mode_change);
			mhi_chan->mode_change = 0;
		}
	}

+20 −19
Original line number Diff line number Diff line
@@ -16,6 +16,8 @@
#include <linux/mhi.h>
#include "mhi_internal.h"

static void mhi_special_events_pending(struct mhi_controller *mhi_cntrl);

/*
 * Not all MHI states transitions are sync transitions. Linkdown, SSR, and
 * shutdown can happen anytime asynchronously. This function will transition to
@@ -527,6 +529,8 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
		mhi_timesync_log(mhi_cntrl);

	mhi_special_events_pending(mhi_cntrl);

	MHI_LOG("Adding new devices\n");

	/* add supported devices */
@@ -644,8 +648,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,

	MHI_LOG("Waiting for all pending threads to complete\n");
	wake_up_all(&mhi_cntrl->state_event);
	flush_work(&mhi_cntrl->fw_worker);
	flush_work(&mhi_cntrl->low_priority_worker);
	flush_work(&mhi_cntrl->special_work);

	/* remove support for time sync */
	mhi_destroy_timesync(mhi_cntrl);
@@ -779,18 +782,19 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
	return 0;
}

static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
static void mhi_special_events_pending(struct mhi_controller *mhi_cntrl)
{
	struct mhi_event *mhi_event;

	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
	list_for_each_entry(mhi_event, &mhi_cntrl->sp_ev_rings, node) {
		struct mhi_event_ctxt *er_ctxt =
			&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
		struct mhi_ring *ev_ring = &mhi_event->ring;

		spin_lock_bh(&mhi_event->lock);
		if (ev_ring->rp != mhi_to_virtual(ev_ring, er_ctxt->rp)) {
			schedule_work(&mhi_cntrl->low_priority_worker);
			queue_work(mhi_cntrl->special_wq,
				   &mhi_cntrl->special_work);
			spin_unlock_bh(&mhi_event->lock);
			break;
		}
@@ -798,11 +802,11 @@ static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
	}
}

void mhi_low_priority_worker(struct work_struct *work)
void mhi_special_purpose_work(struct work_struct *work)
{
	struct mhi_controller *mhi_cntrl = container_of(work,
							struct mhi_controller,
							low_priority_worker);
							special_work);
	struct mhi_event *mhi_event;

	MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
@@ -810,8 +814,8 @@ void mhi_low_priority_worker(struct work_struct *work)
		 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
		 TO_MHI_EXEC_STR(mhi_cntrl->ee));

	/* check low priority event rings and process events */
	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node)
	/* check special purpose event rings and process events */
	list_for_each_entry(mhi_event, &mhi_cntrl->sp_ev_rings, node)
		mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
}

@@ -852,7 +856,7 @@ void mhi_pm_st_worker(struct work_struct *work)
				mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
			write_unlock_irq(&mhi_cntrl->pm_lock);
			if (MHI_IN_PBL(mhi_cntrl->ee))
				wake_up_all(&mhi_cntrl->state_event);
				mhi_fw_load_handler(mhi_cntrl);
			break;
		case MHI_ST_TRANSITION_SBL:
			write_lock_irq(&mhi_cntrl->pm_lock);
@@ -961,9 +965,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
	next_state = MHI_IN_PBL(current_ee) ?
		MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY;

	if (next_state == MHI_ST_TRANSITION_PBL)
		schedule_work(&mhi_cntrl->fw_worker);

	mhi_queue_state_transition(mhi_cntrl, next_state);

	mhi_init_debugfs(mhi_cntrl);
@@ -1381,11 +1382,11 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)

	/*
	 * If MHI on host is in suspending/suspended state, we do not process
	 * any low priority requests, for example, bandwidth scaling events
	 * from the device. Check for low priority event rings and handle the
	 * pending events upon resume.
	 * any special purpose requests, for example, bandwidth scaling events
	 * from the device. Check for special purpose event rings and handle
	 * the pending events upon resume.
	 */
	mhi_low_priority_events_pending(mhi_cntrl);
	mhi_special_events_pending(mhi_cntrl);

	return 0;
}
@@ -1476,8 +1477,8 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
		mhi_msi_handlr(0, mhi_event);
	}

	/* schedules worker if any low priority events need to be handled */
	mhi_low_priority_events_pending(mhi_cntrl);
	/* schedules worker if any special purpose events need to be handled */
	mhi_special_events_pending(mhi_cntrl);

	MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
		to_mhi_pm_state_str(mhi_cntrl->pm_state),
Loading