Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2ebf0076 authored by Rama Krishna Phani A's avatar Rama Krishna Phani A
Browse files

msm: mhi_dev: Add support to trigger MSI via EDMA



Some usecases of MHI use EDMA instead of IPA DMA.

EP PCIe triggers MSI via CPU. For usecases that uses EMDA,
possible NOC congestion can happen as multiple masters
(Controller, CPU) are accessing NOC for data transfers and
triggering MSI that might lead to NOC timeouts during high
throughput data transfers.

Add MSI trigger support for MHI to make sure that data transfers
and MSI (via EDMA) are triggerred by single master (PCIe controller)
for EDMA usecase.

Change-Id: Ie704522847a9a48e943ec78aea5a807d573dee8d
Signed-off-by: default avatarRama Krishna Phani A <rphani@codeaurora.org>
parent 7fd290d4
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -2783,7 +2783,7 @@ int ep_pcie_core_get_msi_config(struct ep_pcie_msi_config *cfg)
					msi->start, 0, msi->end,
					lower, upper);

		if (ep_pcie_dev.active_config) {
		if (ep_pcie_dev.active_config || ep_pcie_dev.pcie_edma) {
			cfg->lower = lower;
			cfg->upper = upper;
		} else {
+78 −8
Original line number Diff line number Diff line
@@ -265,9 +265,11 @@ static void mhi_dev_event_rd_offset_completion_cb(void *req)
	/* rp update in host memory should be flushed before sending an MSI */
	wmb();
	ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[ereq->event_ring];
	if (mhi_ctx->use_ipa) {
		rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
		if (rc)
			pr_err("%s: error sending in msi\n", __func__);
	}

	/* Add back the flushed events space to the event buffer */
	ch->evt_buf_wp = ereq->start + ereq->num_events;
@@ -282,6 +284,55 @@ static void mhi_dev_event_rd_offset_completion_cb(void *req)
	spin_unlock_irqrestore(&mhi->lock, flags);
}

static void msi_trigger_completion_cb(void *data)
{
	mhi_log(MHI_MSG_VERBOSE,
			"%s invoked\n", __func__);
}

static int mhi_trigger_msi_edma(struct mhi_dev_ring *ring, u32 idx)
{
	struct dma_async_tx_descriptor *descriptor;
	struct ep_pcie_msi_config cfg;
	struct msi_buf_cb_data *msi_buf;
	int rc;
	unsigned long flags;

	if (!mhi_ctx->msi_lower) {
		rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
		if (rc) {
			pr_err("Error retrieving pcie msi logic\n");
			return rc;
		}

		mhi_ctx->msi_data = cfg.data;
		mhi_ctx->msi_lower = cfg.lower;
	}

	mhi_log(MHI_MSG_VERBOSE,
		"Trigger MSI via edma, MSI lower:%x IRQ:%d idx:%d\n",
		mhi_ctx->msi_lower, mhi_ctx->msi_data + idx, idx);

	spin_lock_irqsave(&mhi_ctx->msi_lock, flags);

	msi_buf = &ring->msi_buf;
	msi_buf->buf[0] = (mhi_ctx->msi_data + idx);

	descriptor = dmaengine_prep_dma_memcpy(mhi_ctx->tx_dma_chan,
				(dma_addr_t)(mhi_ctx->msi_lower),
				msi_buf->dma_addr,
				sizeof(u32),
				DMA_PREP_INTERRUPT);

	descriptor->callback_param = msi_buf;
	descriptor->callback = msi_trigger_completion_cb;
	dma_async_issue_pending(mhi_ctx->tx_dma_chan);

	spin_unlock_irqrestore(&mhi_ctx->msi_lock, flags);

	return 0;
}

static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring,
		struct event_req *ereq, uint32_t evt_len)
{
@@ -302,8 +353,9 @@ static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring,
		return -EINVAL;
	}

	if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
	ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];

	if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
		rc = mhi_ring_start(ring, ctx, mhi);
		if (rc) {
			mhi_log(MHI_MSG_ERROR,
@@ -369,6 +421,13 @@ static int mhi_dev_send_multiple_tr_events(struct mhi_dev *mhi, int evnt_ring,
	ereq->event_ring = evnt_ring;
	mhi_ctx->write_to_host(mhi, &transfer_addr, ereq, MHI_DEV_DMA_ASYNC);
	mutex_unlock(&ring->event_lock);

	if (mhi_ctx->use_edma) {
		rc = mhi_trigger_msi_edma(ring, ctx->ev.msivec);
		if (rc)
			pr_err("%s: error sending in msi\n", __func__);
	}

	return rc;
}

@@ -751,9 +810,9 @@ void mhi_dev_write_to_host_edma(struct mhi_dev *mhi, struct mhi_addr *transfer,
	}

	mhi_log(MHI_MSG_VERBOSE,
		"device 0x%llx --> host 0x%llx, size %d\n",
		"device 0x%llx --> host 0x%llx, size %d, type = %d\n",
		mhi->cache_dma_handle, host_addr_pa,
		(int) transfer->size);
		(int) transfer->size, tr_type);
	if (tr_type == MHI_DEV_DMA_ASYNC) {
		/*
		 * Event read pointer memory is dma_alloc_coherent memory
@@ -1393,7 +1452,12 @@ int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
	mhi_log(MHI_MSG_VERBOSE, "evnt type :0x%x\n", el->evt_tr_comp.type);
	mhi_log(MHI_MSG_VERBOSE, "evnt chid :0x%x\n", el->evt_tr_comp.chid);

	return ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);
	if (mhi_ctx->use_edma)
		rc = mhi_trigger_msi_edma(ring, ctx->ev.msivec);
	else
		rc = ep_pcie_trigger_msi(mhi_ctx->phandle, ctx->ev.msivec);

	return rc;
}

static int mhi_dev_send_completion_event_async(struct mhi_dev_channel *ch,
@@ -3479,6 +3543,11 @@ static int mhi_deinit(struct mhi_dev *mhi)
			sizeof(union mhi_dev_ring_element_type),
			ring->ring_cache,
			ring->ring_cache_dma_handle);

		if (mhi->use_edma)
			dma_free_coherent(mhi->dev, sizeof(u32),
				ring->msi_buf.buf,
				ring->msi_buf.dma_addr);
	}

	devm_kfree(&pdev->dev, mhi->mmio_backup);
@@ -3527,6 +3596,7 @@ static int mhi_init(struct mhi_dev *mhi)
	}

	spin_lock_init(&mhi->lock);
	spin_lock_init(&mhi->msi_lock);
	mhi->mmio_backup = devm_kzalloc(&pdev->dev,
			MHI_DEV_MMIO_RANGE, GFP_KERNEL);
	if (!mhi->mmio_backup)
@@ -3834,7 +3904,7 @@ static int mhi_edma_init(struct device *dev)
			mhi_ctx->tx_dma_chan);

	mhi_ctx->rx_dma_chan = dma_request_slave_channel(dev, "rx");
	if (IS_ERR_OR_NULL(mhi_ctx->tx_dma_chan)) {
	if (IS_ERR_OR_NULL(mhi_ctx->rx_dma_chan)) {
		pr_err("%s(): request for RX chan failed\n", __func__);
		return -EIO;
	}
+9 −0
Original line number Diff line number Diff line
@@ -369,6 +369,11 @@ enum mhi_dev_transfer_type {
	MHI_DEV_DMA_ASYNC,
};

struct msi_buf_cb_data {
	u32 *buf;
	dma_addr_t dma_addr;
};

struct mhi_dev_channel;

struct mhi_dev_ring {
@@ -397,6 +402,7 @@ struct mhi_dev_ring {
	union mhi_dev_ring_ctx			*ring_ctx;
	/* ring_ctx_shadow -> tracking ring_ctx in the host */
	union mhi_dev_ring_ctx			*ring_ctx_shadow;
	struct msi_buf_cb_data		msi_buf;
	void (*ring_cb)(struct mhi_dev *dev,
			union mhi_dev_ring_element_type *el,
			void *ctx);
@@ -495,6 +501,9 @@ struct mhi_dev {

	uint32_t			*mmio_backup;
	struct mhi_config		cfg;
	u32				msi_data;
	u32				msi_lower;
	spinlock_t			msi_lock;
	bool				mmio_initialized;

	spinlock_t			lock;
+44 −2
Original line number Diff line number Diff line
@@ -26,6 +26,13 @@

#include "mhi.h"

static struct event_req dummy_ereq;

static void mhi_dev_event_buf_completion_dummy_cb(void *req)
{
	mhi_log(MHI_MSG_VERBOSE, "%s invoked\n", __func__);
}

static size_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p)
{
	uint64_t rbase;
@@ -355,8 +362,19 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
		host_addr.virt_addr = element;
		host_addr.size = (ring->ring_size - old_offset) *
			sizeof(union mhi_dev_ring_element_type);

		if (mhi_ctx->use_ipa) {
			mhi_ctx->write_to_host(ring->mhi_dev, &host_addr,
				NULL, MHI_DEV_DMA_SYNC);
		} else {
			dummy_ereq.event_type = SEND_EVENT_BUFFER;
			host_addr.phy_addr = 0;
			/* Nothing to do in the callback */
			dummy_ereq.client_cb =
				mhi_dev_event_buf_completion_dummy_cb;
			mhi_ctx->write_to_host(ring->mhi_dev, &host_addr,
					&dummy_ereq, MHI_DEV_DMA_ASYNC);
		}

		/* Copy remaining elements */
		if (MHI_USE_DMA(mhi_ctx))
@@ -374,6 +392,24 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
}
EXPORT_SYMBOL(mhi_dev_add_element);

static int mhi_dev_ring_alloc_msi_buf(struct mhi_dev_ring *ring)
{
	if (ring->msi_buf.buf) {
		mhi_log(MHI_MSG_INFO, "MSI buf already allocated\n");
		return 0;
	}

	ring->msi_buf.buf = dma_alloc_coherent(&ring->mhi_dev->pdev->dev,
				sizeof(u32),
				&ring->msi_buf.dma_addr,
				GFP_KERNEL);

	if (!ring->msi_buf.buf)
		return -ENOMEM;

	return 0;
}

int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
							struct mhi_dev *mhi)
{
@@ -439,6 +475,12 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
			(size_t)ring->ring_ctx->generic.wp);
	ring->wr_offset = wr_offset;

	if (mhi->use_edma) {
		rc = mhi_dev_ring_alloc_msi_buf(ring);
		if (rc)
			return rc;
	}

	return rc;
}
EXPORT_SYMBOL(mhi_ring_start);