Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 31079b8c authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mhi: core: device requested bandwidth scaling support"

parents 75f0ee6b 5497a9f6
Loading
Loading
Loading
Loading
+70 −8
Original line number Diff line number Diff line
@@ -192,7 +192,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;

	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -216,7 +216,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
		return ret;

	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		ret = request_irq(mhi_cntrl->irq[mhi_event->msi],
@@ -233,7 +233,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)

error_request:
	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -504,15 +504,18 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
	return ret;
}

static int mhi_get_tsync_er_cfg(struct mhi_controller *mhi_cntrl)
/* to be used only if a single event ring with the type is present */
static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
			    enum mhi_er_data_type type)
{
	int i;
	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;

	/* find event ring with timesync support */
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++)
		if (mhi_event->data_type == MHI_ER_TSYNC_ELEMENT_TYPE)
	/* find event ring for requested type */
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->data_type == type)
			return mhi_event->er_index;
	}

	return -ENOENT;
}
@@ -587,7 +590,7 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
	read_unlock_bh(&mhi_cntrl->pm_lock);

	/* get time-sync event ring configuration */
	ret = mhi_get_tsync_er_cfg(mhi_cntrl);
	ret = mhi_get_er_index(mhi_cntrl, MHI_ER_TSYNC_ELEMENT_TYPE);
	if (ret < 0) {
		MHI_LOG("Could not find timesync event ring\n");
		return ret;
@@ -617,6 +620,36 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
	return ret;
}

static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
{
	int ret, er_index;
	u32 bw_cfg_offset;

	/* controller doesn't support dynamic bw switch */
	if (!mhi_cntrl->bw_scale)
		return -ENODEV;

	ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID,
					&bw_cfg_offset);
	if (ret)
		return ret;

	/* No ER configured to support BW scale */
	er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE_ELEMENT_TYPE);
	if (ret < 0)
		return er_index;

	bw_cfg_offset += BW_SCALE_CFG_OFFSET;

	MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);

	/* advertise host support */
	mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
		      MHI_BW_SCALE_SETUP(er_index));

	return 0;
}

int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
{
	u32 val;
@@ -713,6 +746,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
	mhi_cntrl->wake_set = false;

	/* setup bw scale db */
	mhi_cntrl->bw_scale_db = base + val + (8 * MHI_BW_SCALE_CHAN_DB);

	/* setup channel db addresses */
	mhi_chan = mhi_cntrl->mhi_chan;
	for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
@@ -743,6 +779,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
				    reg_info[i].mask, reg_info[i].shift,
				    reg_info[i].val);

	/* setup bandwidth scaling features */
	mhi_init_bw_scale(mhi_cntrl);

	return 0;
}

@@ -893,6 +932,8 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
	if (!mhi_cntrl->mhi_event)
		return -ENOMEM;

	INIT_LIST_HEAD(&mhi_cntrl->lp_ev_rings);

	/* populate ev ring */
	mhi_event = mhi_cntrl->mhi_event;
	i = 0;
@@ -958,6 +999,9 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
		case MHI_ER_TSYNC_ELEMENT_TYPE:
			mhi_event->process_event = mhi_process_tsync_event_ring;
			break;
		case MHI_ER_BW_SCALE_ELEMENT_TYPE:
			mhi_event->process_event = mhi_process_bw_scale_ev_ring;
			break;
		}

		mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev");
@@ -969,6 +1013,19 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
							"mhi,client-manage");
		mhi_event->offload_ev = of_property_read_bool(child,
							      "mhi,offload");

		/*
		 * low priority events are handled in a separate worker thread
		 * to allow for sleeping functions to be called.
		 */
		if (!mhi_event->offload_ev) {
			if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
				list_add_tail(&mhi_event->node,
						&mhi_cntrl->lp_ev_rings);
			else
				mhi_event->request_irq = true;
		}

		mhi_event++;
	}

@@ -1248,6 +1305,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
	INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
	init_waitqueue_head(&mhi_cntrl->state_event);

	mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -1261,6 +1319,10 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)

		mhi_event->mhi_cntrl = mhi_cntrl;
		spin_lock_init(&mhi_event->lock);

		if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
			continue;

		if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE)
			tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
				     (ulong)mhi_event);
+41 −3
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@ extern struct bus_type mhi_bus_type;

/* MHI mmio register mapping */
#define PCI_INVALID_READ(val) (val == U32_MAX)
#define MHI_REG_SIZE (SZ_4K)

#define MHIREGLEN (0x0)
#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
@@ -162,6 +163,17 @@ extern struct bus_type mhi_bus_type;

#define TIMESYNC_CAP_ID (2)

/* MHI Bandwidth scaling offsets */
#define BW_SCALE_CFG_OFFSET (0x04)
#define BW_SCALE_CFG_CHAN_DB_ID_MASK (0xFE000000)
#define BW_SCALE_CFG_CHAN_DB_ID_SHIFT (25)
#define BW_SCALE_CFG_ENABLED_MASK (0x01000000)
#define BW_SCALE_CFG_ENABLED_SHIFT (24)
#define BW_SCALE_CFG_ER_ID_MASK (0x00F80000)
#define BW_SCALE_CFG_ER_ID_SHIFT (19)

#define BW_SCALE_CAP_ID (3)

/* MHI BHI offfsets */
#define BHI_BHIVERSION_MINOR (0x00)
#define BHI_BHIVERSION_MAJOR (0x04)
@@ -335,12 +347,13 @@ enum mhi_cmd_type {
#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
#define MHI_TRE_GET_EV_TSYNC_SEQ(tre) ((tre)->dword[0])
#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) (((tre)->dword[0] >> 8) & 0xFF)

/* transfer descriptor macros */
#define MHI_TRE_DATA_PTR(ptr) (ptr)
@@ -497,19 +510,38 @@ enum MHI_XFER_TYPE {
#define NR_OF_CMD_RINGS (1)
#define CMD_EL_PER_RING (128)
#define PRIMARY_CMD_RING (0)
#define MHI_BW_SCALE_CHAN_DB (126)
#define MHI_DEV_WAKE_DB (127)
#define MHI_MAX_MTU (0xffff)

#define MHI_BW_SCALE_SETUP(er_index) ((MHI_BW_SCALE_CHAN_DB << \
	BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK | \
	(1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK | \
	((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK)

#define MHI_BW_SCALE_RESULT(status, seq) ((status & 0xF) << 8 | (seq & 0xFF))
#define MHI_BW_SCALE_NACK 0xF

enum MHI_ER_TYPE {
	MHI_ER_TYPE_INVALID = 0x0,
	MHI_ER_TYPE_VALID = 0x1,
};

enum mhi_er_priority {
	MHI_ER_PRIORITY_HIGH,
	MHI_ER_PRIORITY_MEDIUM,
	MHI_ER_PRIORITY_LOW,
};

#define IS_MHI_ER_PRIORITY_LOW(ev) (ev->priority >= MHI_ER_PRIORITY_LOW)
#define IS_MHI_ER_PRIORITY_HIGH(ev) (ev->priority == MHI_ER_PRIORITY_HIGH)

enum mhi_er_data_type {
	MHI_ER_DATA_ELEMENT_TYPE,
	MHI_ER_CTRL_ELEMENT_TYPE,
	MHI_ER_TSYNC_ELEMENT_TYPE,
	MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE,
	MHI_ER_BW_SCALE_ELEMENT_TYPE,
	MHI_ER_DATA_TYPE_MAX = MHI_ER_BW_SCALE_ELEMENT_TYPE,
};

enum mhi_ch_ee_mask {
@@ -592,17 +624,19 @@ struct mhi_buf_info {
};

struct mhi_event {
	struct list_head node;
	u32 er_index;
	u32 intmod;
	u32 msi;
	int chan; /* this event ring is dedicated to a channel */
	u32 priority;
	enum mhi_er_priority priority;
	enum mhi_er_data_type data_type;
	struct mhi_ring ring;
	struct db_cfg db_cfg;
	bool hw_ring;
	bool cl_manage;
	bool offload_ev; /* managed by a device driver */
	bool request_irq; /* has dedicated interrupt handler */
	spinlock_t lock;
	struct mhi_chan *mhi_chan; /* dedicated to channel */
	struct tasklet_struct task;
@@ -703,6 +737,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
void mhi_pm_st_worker(struct work_struct *work);
void mhi_fw_load_worker(struct work_struct *work);
void mhi_pm_sys_err_worker(struct work_struct *work);
void mhi_low_priority_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
@@ -715,6 +750,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
			     struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
				 struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
				 struct mhi_event *mhi_event, u32 event_quota);
int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
		 enum MHI_CMD cmd);
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
@@ -756,6 +793,7 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
		      struct mhi_chan *mhi_chan);
int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
			      u32 *offset);
void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
+102 −23
Original line number Diff line number Diff line
@@ -90,7 +90,9 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
		if (ret)
			return ret;

		*offset += next_offset;
		*offset = next_offset;
		if (*offset >= MHI_REG_SIZE)
			return -ENXIO;
	} while (next_offset);

	return -ENXIO;
@@ -264,7 +266,7 @@ static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
	return nr_el;
}

static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
{
	return (addr - ring->iommu_base) + ring->base;
}
@@ -1148,25 +1150,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
			local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);

		switch (type) {
		case MHI_PKT_TYPE_BW_REQ_EVENT:
		{
			struct mhi_link_info *link_info;

			link_info = &mhi_cntrl->mhi_link_info;
			write_lock_irq(&mhi_cntrl->pm_lock);
			link_info->target_link_speed =
				MHI_TRE_GET_EV_LINKSPEED(local_rp);
			link_info->target_link_width =
				MHI_TRE_GET_EV_LINKWIDTH(local_rp);
			write_unlock_irq(&mhi_cntrl->pm_lock);
			MHI_VERB(
				 "Received BW_REQ with link speed:0x%x width:0x%x\n",
				 link_info->target_link_speed,
				 link_info->target_link_width);
			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
					     MHI_CB_BW_REQ);
			break;
		}
		case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
		{
			enum mhi_dev_state new_state;
@@ -1352,7 +1335,7 @@ int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,

		MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event");

		sequence = MHI_TRE_GET_EV_SEQ(local_rp);
		sequence = MHI_TRE_GET_EV_TSYNC_SEQ(local_rp);
		remote_time = MHI_TRE_GET_EV_TIME(local_rp);

		do {
@@ -1398,6 +1381,94 @@ int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
	return count;
}

int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
				 struct mhi_event *mhi_event,
				 u32 event_quota)
{
	struct mhi_tre *dev_rp;
	struct mhi_ring *ev_ring = &mhi_event->ring;
	struct mhi_event_ctxt *er_ctxt =
		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
	struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
	int result, ret = 0;

	mutex_lock(&mhi_cntrl->pm_mutex);

	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
		MHI_LOG("No EV access, PM_STATE:%s\n",
			to_mhi_pm_state_str(mhi_cntrl->pm_state));
		ret = -EIO;
		goto exit_bw_process;
	}

	/*
	 * BW change is not process during suspend since we're suspending link,
	 * host will process it during resume
	 */
	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
		ret = -EACCES;
		goto exit_bw_process;
	}

	spin_lock_bh(&mhi_event->lock);
	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);

	if (ev_ring->rp == dev_rp) {
		spin_unlock_bh(&mhi_event->lock);
		goto exit_bw_process;
	}

	/* if rp points to base, we need to wrap it around */
	if (dev_rp == ev_ring->base)
		dev_rp = ev_ring->base + ev_ring->len;
	dev_rp--;

	MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT,
		   "!BW SCALE REQ event");

	link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
	link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
	link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);

	MHI_VERB("Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
		 link_info.sequence_num,
		 link_info.target_link_speed,
		 link_info.target_link_width);

	/* fast forward to currently processed element and recycle er */
	ev_ring->rp = dev_rp;
	ev_ring->wp = dev_rp - 1;
	if (ev_ring->wp < ev_ring->base)
		ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
	mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);

	read_lock_bh(&mhi_cntrl->pm_lock);
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
		mhi_ring_er_db(mhi_event);
	read_unlock_bh(&mhi_cntrl->pm_lock);
	spin_unlock_bh(&mhi_event->lock);

	ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
	if (!ret)
		*cur_info = link_info;

	result = ret ? MHI_BW_SCALE_NACK : 0;

	read_lock_bh(&mhi_cntrl->pm_lock);
	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
		mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
			      MHI_BW_SCALE_RESULT(result,
						  link_info.sequence_num));
	read_unlock_bh(&mhi_cntrl->pm_lock);

exit_bw_process:
	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);

	mutex_unlock(&mhi_cntrl->pm_mutex);

	return ret;
}

void mhi_ev_task(unsigned long data)
{
	struct mhi_event *mhi_event = (struct mhi_event *)data;
@@ -1478,7 +1549,13 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev)

		if (mhi_dev)
			mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA);
	} else

		return IRQ_HANDLED;
	}

	if (IS_MHI_ER_PRIORITY_HIGH(mhi_event))
		tasklet_hi_schedule(&mhi_event->task);
	else
		tasklet_schedule(&mhi_event->task);

	return IRQ_HANDLED;
@@ -1548,6 +1625,8 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
	wake_up_all(&mhi_cntrl->state_event);
	MHI_VERB("Exit\n");

	schedule_work(&mhi_cntrl->low_priority_worker);

	return IRQ_WAKE_THREAD;
}

+52 −2
Original line number Diff line number Diff line
@@ -598,7 +598,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
	MHI_LOG("Waiting for all pending event ring processing to complete\n");
	mhi_event = mhi_cntrl->mhi_event;
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;
		tasklet_kill(&mhi_event->task);
	}
@@ -617,6 +617,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
	wake_up_all(&mhi_cntrl->state_event);
	flush_work(&mhi_cntrl->st_worker);
	flush_work(&mhi_cntrl->fw_worker);
	flush_work(&mhi_cntrl->low_priority_worker);

	mutex_lock(&mhi_cntrl->pm_mutex);

@@ -729,6 +730,44 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
	return 0;
}

static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
{
	struct mhi_event *mhi_event;

	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
		struct mhi_event_ctxt *er_ctxt =
			&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
		struct mhi_ring *ev_ring = &mhi_event->ring;

		spin_lock_bh(&mhi_event->lock);
		if (ev_ring->rp != mhi_to_virtual(ev_ring, er_ctxt->rp)) {
			schedule_work(&mhi_cntrl->low_priority_worker);
			spin_unlock_bh(&mhi_event->lock);
			break;
		}
		spin_unlock_bh(&mhi_event->lock);
	}
}

void mhi_low_priority_worker(struct work_struct *work)
{
	struct mhi_controller *mhi_cntrl = container_of(work,
							struct mhi_controller,
							low_priority_worker);
	struct mhi_event *mhi_event;

	MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
		 to_mhi_pm_state_str(mhi_cntrl->pm_state),
		 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
		 TO_MHI_EXEC_STR(mhi_cntrl->ee));

	/* check low priority event rings and process events */
	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
		if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
			mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
	}
}

void mhi_pm_sys_err_worker(struct work_struct *work)
{
	struct mhi_controller *mhi_cntrl = container_of(work,
@@ -1253,6 +1292,14 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
		return -EIO;
	}

	/*
	 * If MHI on host is in suspending/suspended state, we do not process
	 * any low priority requests, for example, bandwidth scaling events
	 * from the device. Check for low priority event rings and handle the
	 * pending events upon resume.
	 */
	mhi_low_priority_events_pending(mhi_cntrl);

	return 0;
}

@@ -1317,12 +1364,15 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
	 */
	mhi_event = mhi_cntrl->mhi_event;
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
		if (mhi_event->offload_ev)
		if (!mhi_event->request_irq)
			continue;

		mhi_msi_handlr(0, mhi_event);
	}

	/* schedules worker if any low priority events need to be handled */
	mhi_low_priority_events_pending(mhi_cntrl);

	MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
		to_mhi_pm_state_str(mhi_cntrl->pm_state),
		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+7 −0
Original line number Diff line number Diff line
@@ -124,10 +124,12 @@ enum mhi_dev_state {
 * struct mhi_link_info - bw requirement
 * target_link_speed - as defined by TLS bits in LinkControl reg
 * target_link_width - as defined by NLW bits in LinkStatus reg
 * sequence_num - used by device to track bw requests sent to host
 */
struct mhi_link_info {
	unsigned int target_link_speed;
	unsigned int target_link_width;
	int sequence_num;
};

#define MHI_VOTE_BUS BIT(0) /* do not disable the bus */
@@ -206,6 +208,7 @@ struct mhi_controller {
	void __iomem *bhi;
	void __iomem *bhie;
	void __iomem *wake_db;
	void __iomem *bw_scale_db;

	/* device topology */
	u32 dev_id;
@@ -248,6 +251,7 @@ struct mhi_controller {
	u32 msi_allocated;
	int *irq; /* interrupt table */
	struct mhi_event *mhi_event;
	struct list_head lp_ev_rings; /* low priority event rings */

	/* cmd rings */
	struct mhi_cmd *mhi_cmd;
@@ -286,6 +290,7 @@ struct mhi_controller {
	struct work_struct st_worker;
	struct work_struct fw_worker;
	struct work_struct syserr_worker;
	struct work_struct low_priority_worker;
	wait_queue_head_t state_event;

	/* shadow functions */
@@ -304,6 +309,8 @@ struct mhi_controller {
	void (*unmap_single)(struct mhi_controller *mhi_cntrl,
			     struct mhi_buf_info *buf);
	void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
	int (*bw_scale)(struct mhi_controller *mhi_cntrl,
			struct mhi_link_info *link_info);

	/* channel to control DTR messaging */
	struct mhi_device *dtr_dev;