Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0c3c8033 authored by Sujeev Dias's avatar Sujeev Dias Committed by Gerrit - the friendly Code Review server
Browse files

mhi_bus: core: add support for pre-allocating buffers for DL channels



Like other transport layers such as rpmsg, add support to
pre-allocate buffers for DL data path. So, client drivers do not
need to drastically change their implementation when switching to
a different transport layer.

CRs-Fixed: 2221013
Change-Id: I2e10089295c003a43d1b6f46c7c877a5321f5bc9
Signed-off-by: default avatarSujeev Dias <sdias@codeaurora.org>
parent ee84a2e4
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -52,6 +52,9 @@ Main node properties:
		the data pipe. Not involved in active data transfer.
		BIT(2) : Must switch to doorbell mode whenever MHI M0 state
		transition happens.
		BIT(3) : MHI bus driver pre-allocate buffer for this channel.
		If set, clients not allowed to queue buffers. Valid only for DL
		direction.

- mhi,chan-names
  Usage: required
+10 −0
Original line number Diff line number Diff line
@@ -823,6 +823,16 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
		mhi_chan->offload_ch = !!(bit_cfg & MHI_CH_CFG_BIT_OFFLOAD_CH);
		mhi_chan->db_cfg.reset_req =
			!!(bit_cfg & MHI_CH_CFG_BIT_DBMODE_RESET_CH);
		mhi_chan->pre_alloc = !!(bit_cfg & MHI_CH_CFG_BIT_PRE_ALLOC);

		if (mhi_chan->pre_alloc &&
		    (mhi_chan->dir != DMA_FROM_DEVICE ||
		     mhi_chan->xfer_type != MHI_XFER_BUFFER))
			goto error_chan_cfg;

		/* if mhi host allocate the buffers then client cannot queue */
		if (mhi_chan->pre_alloc)
			mhi_chan->queue_xfer = mhi_queue_nop;

		ret = of_property_read_string_index(of_node, "mhi,chan-names",
						    i, &mhi_chan->name);
+2 −0
Original line number Diff line number Diff line
@@ -339,6 +339,7 @@ enum MHI_CH_CFG {
#define MHI_CH_CFG_BIT_LPM_NOTIFY BIT(0) /* require LPM notification */
#define MHI_CH_CFG_BIT_OFFLOAD_CH BIT(1) /* satellite mhi devices */
#define MHI_CH_CFG_BIT_DBMODE_RESET_CH BIT(2) /* require db mode to reset */
#define MHI_CH_CFG_BIT_PRE_ALLOC BIT(3) /* host allocate buffers for DL */

enum MHI_EV_CFG {
	MHI_EV_CFG_ELEMENTS = 0,
@@ -565,6 +566,7 @@ struct mhi_chan {
	bool lpm_notify;
	bool configured;
	bool offload_ch;
	bool pre_alloc;
	/* functions that generate the transfer ring elements */
	int (*gen_tre)(struct mhi_controller *, struct mhi_chan *, void *,
		       void *, size_t, enum MHI_FLAGS);
+54 −0
Original line number Diff line number Diff line
@@ -23,6 +23,9 @@
#include <linux/mhi.h>
#include "mhi_internal.h"

static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
				    struct mhi_chan *mhi_chan);

int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
			      void __iomem *base,
			      u32 offset,
@@ -661,6 +664,22 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
				mhi_cntrl->wake_put(mhi_cntrl, false);
				read_unlock_bh(&mhi_cntrl->pm_lock);
			}

			/*
			 * recycle the buffer if buffer is pre-allocated,
			 * if there is error, not much we can do apart from
			 * dropping the packet
			 */
			if (mhi_chan->pre_alloc) {
				if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan,
						  buf_info->cb_buf,
						  buf_info->len, MHI_EOT)) {
					MHI_ERR(
						"Error recycling buffer for chan:%d\n",
						mhi_chan->chan);
					kfree(buf_info->cb_buf);
				}
			}
		};
		break;
	} /* CC_EOT */
@@ -1086,6 +1105,32 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
	mhi_cntrl->wake_put(mhi_cntrl, false);
	read_unlock_bh(&mhi_cntrl->pm_lock);

	/* pre allocate buffer for xfer ring */
	if (mhi_chan->pre_alloc) {
		struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
		int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
						       &mhi_chan->tre_ring);

		while (nr_el--) {
			void *buf;

			buf = kmalloc(MHI_MAX_MTU, GFP_KERNEL);
			if (!buf) {
				ret = -ENOMEM;
				goto error_pre_alloc;
			}

			ret = mhi_queue_buf(mhi_dev, mhi_chan, buf, MHI_MAX_MTU,
					    MHI_EOT);
			if (ret) {
				MHI_ERR("Chan:%d error queue buffer\n",
					mhi_chan->chan);
				kfree(buf);
				goto error_pre_alloc;
			}
		}
	}

	mutex_unlock(&mhi_chan->mutex);

	MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan);
@@ -1104,6 +1149,12 @@ static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
error_init_chan:
	mutex_unlock(&mhi_chan->mutex);

	return ret;

error_pre_alloc:
	mutex_unlock(&mhi_chan->mutex);
	__mhi_unprepare_channel(mhi_cntrl, mhi_chan);

	return ret;
}

@@ -1169,6 +1220,9 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
				 buf_info->len, buf_info->dir);
		mhi_del_ring_element(mhi_cntrl, buf_ring);
		mhi_del_ring_element(mhi_cntrl, tre_ring);

		if (mhi_chan->pre_alloc)
			kfree(buf_info->cb_buf);
	}

	read_unlock_bh(&mhi_cntrl->pm_lock);