Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed328e7b authored by Mohana Basava Tejesh Reddy Mareddy's avatar Mohana Basava Tejesh Reddy Mareddy
Browse files

msm: mhi_dev: Add support for MSI disable



PCIe endpoint device informs PCIe root complex/host about completion
or availability of new work using MSI. In some cases like UEFI, MSI
functionality is not supported by used PCIe root complex/host, and
can work in polling mode only. In MSI not supported case, endpoint
is not allowed to raise MSI. This change queries MSI functionality
support with root complex/host, and handle case where MSI
functionality is not supported by polling MHI state, and not
invoking MSIs towards host.

Change-Id: Ie9c78f40fe7e9735d3e0104f845e29ac82d76778
Signed-off-by: default avatarMohana Basava Tejesh Reddy Mareddy <quic_mmareddy@quicinc.com>
parent 325681c5
Loading
Loading
Loading
Loading
+64 −8
Original line number Diff line number Diff line
@@ -281,7 +281,11 @@ static int mhi_dev_schedule_msi_ipa(struct mhi_dev *mhi, struct event_req *ereq)
	int rc;

	rc = ep_pcie_get_msi_config(mhi->phandle, &cfg);
	if (rc) {
	if (rc == -EOPNOTSUPP) {
		mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
		mhi_ctx->msi_disable = true;
		return 0;
	} else if (rc) {
		mhi_log(MHI_MSG_ERROR, "Error retrieving pcie msi logic\n");
		return rc;
	}
@@ -335,6 +339,24 @@ static void mhi_dev_event_rd_offset_completion_cb(void *req)
	if (ereq->event_rd_dma)
		dma_unmap_single(&mhi_ctx->pdev->dev, ereq->event_rd_dma,
		sizeof(uint64_t), DMA_TO_DEVICE);

	/*
	 * The mhi_dev_cmd_event_msi_cb and mhi_dev_event_msi_cb APIs does
	 * add back the flushed events space to the event buffer and returns
	 * the event req back to the list. These are registered in the API
	 * mhi_dev_schedule_msi_ipa and get invoked when MSI triggering is
	 * complete.
	 * In the case of MSI being disabled by the host, these callbacks will
	 * not get invoked as triggering MSI is suppressed from device side.
	 * Hence, invoking these callbacks as part of this API to ensure we do
	 * not run out on ereq buffer space in this scenario.
	 */
	if (mhi_ctx->msi_disable) {
		if (ereq->is_cmd_cpl)
			mhi_dev_cmd_event_msi_cb(ereq);
		else
			mhi_dev_event_msi_cb(ereq);
	}
}

static void mhi_dev_cmd_event_msi_cb(void *req)
@@ -415,7 +437,11 @@ static int mhi_trigger_msi_edma(struct mhi_dev_ring *ring, u32 idx)

	if (!mhi_ctx->msi_lower) {
		rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
		if (rc) {
		if (rc == -EOPNOTSUPP) {
			mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
			mhi_ctx->msi_disable = true;
			return 0;
		} else if (rc) {
			mhi_log(MHI_MSG_ERROR,
					"Error retrieving pcie msi logic\n");
			return rc;
@@ -1425,7 +1451,10 @@ static int mhi_hwc_init(struct mhi_dev *mhi)

	/* Call IPA HW_ACC Init with MSI Address and db routing info */
	rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
	if (rc) {
	if (rc == -EOPNOTSUPP) {
		mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
		mhi_ctx->msi_disable = true;
	} else if (rc) {
		mhi_log(MHI_MSG_ERROR,
			"Error retrieving pcie msi logic\n");
		return rc;
@@ -1450,6 +1479,7 @@ static int mhi_hwc_init(struct mhi_dev *mhi)
	ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1);
	ipa_init_params.first_er_idx = erdb_cfg.base;
	ipa_init_params.first_ch_idx = HW_CHANNEL_BASE;
	ipa_init_params.disable_msi = mhi_ctx->msi_disable;

	if (mhi_ctx->config_iatu)
		ipa_init_params.mmio_addr =
@@ -1642,7 +1672,10 @@ int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
	struct mhi_addr transfer_addr;

	rc = ep_pcie_get_msi_config(mhi->phandle, &cfg);
	if (rc) {
	if (rc == -EOPNOTSUPP) {
		mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
		mhi_ctx->msi_disable = true;
	} else if (rc) {
		mhi_log(MHI_MSG_ERROR, "Error retrieving pcie msi logic\n");
		return rc;
	}
@@ -3759,6 +3792,9 @@ static int mhi_dev_recover(struct mhi_dev *mhi)
		mhi_log(MHI_MSG_VERBOSE, "mhi_state = 0x%X, reset = %d\n",
				state, mhi_reset);

		if (mhi_ctx->msi_disable)
			goto poll_for_reset;

		rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec);
		if (rc)
			return rc;
@@ -3785,7 +3821,7 @@ static int mhi_dev_recover(struct mhi_dev *mhi)
		}

		if (bhi_intvec != 0xffffffff) {
			/* Indicate the host that the device is ready */
			/* Indicate the host that device is ready */
			rc = ep_pcie_trigger_msi(mhi->phandle, bhi_intvec);
			if (rc) {
				mhi_log(MHI_MSG_ERROR, "error sending msi\n");
@@ -3793,6 +3829,7 @@ static int mhi_dev_recover(struct mhi_dev *mhi)
			}
		}

poll_for_reset:
		/* Poll for the host to set the reset bit */
		rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
		if (rc) {
@@ -4313,6 +4350,7 @@ static void mhi_dev_reinit(struct work_struct *work)
static int mhi_dev_resume_mmio_mhi_init(struct mhi_dev *mhi_ctx)
{
	struct platform_device *pdev;
	struct ep_pcie_msi_config cfg;
	int rc = 0;

	/*
@@ -4360,6 +4398,24 @@ static int mhi_dev_resume_mmio_mhi_init(struct mhi_dev *mhi_ctx)
		return -EINVAL;
	}

	/*
	 * Fetching MSI config to read the MSI capability and setting the
	 * msi_disable flag based on it.
	 */
	rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
	if (rc == -EOPNOTSUPP) {
		mhi_log(MHI_MSG_VERBOSE, "MSI is disabled\n");
		mhi_ctx->msi_disable = true;
	} else if (!rc) {
		mhi_ctx->msi_disable = false;
	} else {
		mhi_log(MHI_MSG_ERROR,
			"Error retrieving pcie msi logic\n");
		return rc;
	}

	mhi_log(MHI_MSG_VERBOSE, "msi_disable = %d\n", mhi_ctx->msi_disable);

	rc = mhi_dev_recover(mhi_ctx);
	if (rc) {
		mhi_log(MHI_MSG_ERROR, "get mhi state failed\n");
+1 −0
Original line number Diff line number Diff line
@@ -527,6 +527,7 @@ struct mhi_dev {

	uint32_t			*mmio_backup;
	struct mhi_config		cfg;
	bool				msi_disable;
	u32				msi_data;
	u32				msi_lower;
	spinlock_t			msi_lock;
+1 −1
Original line number Diff line number Diff line
@@ -479,7 +479,7 @@ static int mhi_sm_prepare_resume(void)
	case MHI_DEV_READY_STATE:
		res = ep_pcie_get_msi_config(mhi_sm_ctx->mhi_dev->phandle,
			&cfg);
		if (res) {
		if (res && res != -EOPNOTSUPP) {
			MHI_SM_ERR("Error retrieving pcie msi logic\n");
			goto exit;
		}