Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5822687e authored by Siddartha Mohanadoss's avatar Siddartha Mohanadoss
Browse files

mhi_dev: mhi: Support memcpy using IPA DMA



Use IPA DMA for control and data transfers. IPA DMA uses GSI to
transfer data between host and device. As part of using IPA
DMA remove support to do iATU translation between host and
device. Pass physical address to IPA DMA for caching control
context and data transfers.

Change-Id: I9d67862b4db86f11e5d67d0d2b5652d78630f3a4
Signed-off-by: default avatarSiddartha Mohanadoss <smohanad@codeaurora.org>
parent 0d88d69d
Loading
Loading
Loading
Loading
+277 −274

File changed.

Preview size limit exceeded, changes collapsed.

+35 −6
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
#include <linux/msm_ep_pcie.h>
#include <linux/types.h>
#include <linux/ipc_logging.h>
#include <linux/dma-mapping.h>

/* MHI control data structures alloted by the host, including
 * channel context array, event context array, command context and rings */
@@ -271,6 +272,7 @@ struct mhi_config {
#define HW_CHANNEL_END			107
#define MHI_ENV_VALUE			2
#define MHI_MASK_ROWS_CH_EV_DB		4
#define TRB_MAX_DATA_SIZE		4096

/* Possible ring element types */
union mhi_dev_ring_element_type {
@@ -359,9 +361,15 @@ struct mhi_dev_ring {
	enum mhi_dev_ring_type			type;
	enum mhi_dev_ring_state			state;

	/* device virtual address location of the cached host ring ctx data */
	union mhi_dev_ring_element_type		*ring_cache;
	/* Physical address of the cached ring copy on the device side */
	dma_addr_t				ring_cache_dma_handle;
	/* Physical address of the host where we will write/read to/from */
	struct mhi_addr				ring_shadow;
	/* Ring type - cmd, event, transfer ring and its rp/wp... */
	union mhi_dev_ring_ctx			*ring_ctx;
	/* ring_ctx_shadow -> tracking ring_ctx in the host */
	union mhi_dev_ring_ctx			*ring_ctx_shadow;
	void (*ring_cb)(struct mhi_dev *dev,
			union mhi_dev_ring_element_type *el,
@@ -462,11 +470,14 @@ struct mhi_dev {
	struct mhi_addr			data_base;
	struct mhi_addr			ch_ctx_shadow;
	struct mhi_dev_ch_ctx		*ch_ctx_cache;
	dma_addr_t			ch_ctx_cache_dma_handle;
	struct mhi_addr			ev_ctx_shadow;
	struct mhi_dev_ch_ctx		*ev_ctx_cache;
	dma_addr_t			ev_ctx_cache_dma_handle;

	struct mhi_addr			cmd_ctx_shadow;
	struct mhi_dev_ch_ctx		*cmd_ctx_cache;
	dma_addr_t			cmd_ctx_cache_dma_handle;
	struct mhi_dev_ring		*ring;
	int				mhi_irq;
	struct mhi_dev_channel		*ch;
@@ -507,6 +518,21 @@ struct mhi_dev {
	u32				device_local_pa_base;
	u32				mhi_ep_msi_num;
	u32				mhi_version;
	void				*dma_cache;
	void				*read_handle;
	void				*write_handle;
	/* Physical scratch buffer for writing control data to the host */
	dma_addr_t			cache_dma_handle;
	/*
	 * Physical scratch buffer address used when picking host data
	 * from the host used in mhi_read()
	 */
	dma_addr_t			read_dma_handle;
	/*
	 * Physical scratch buffer address used when writing to the host
	 * region from device used in mhi_write()
	 */
	dma_addr_t			write_dma_handle;
};

enum mhi_msg_level {
@@ -708,25 +734,25 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
				union mhi_dev_ring_element_type *element);

/**
 * mhi_memcpy_dev2host() - memcpy equivalent API to transfer data
 * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
 *		from device to the host.
 * @dst_pa:	Physical destination address.
 * @src:	Source virtual address.
 * @len:	Numer of bytes to be transferred.
 * @mhi:	MHI dev structure.
 */
int mhi_memcpy_dev2host(uint32_t dst_pa, void *src, uint32_t len,
int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
				struct mhi_dev *mhi);

/**
 * mhi_memcpy_host2dev() - memcpy equivalent API to transfer data
 * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
 *		from host to the device.
 * @dst:	Physical destination virtual address.
 * @src_pa:	Source physical address.
 * @len:	Numer of bytes to be transferred.
 * @mhi:	MHI dev structure.
 */
int mhi_memcpy_host2dev(void *dst, uint32_t src_pa, uint32_t len,
int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
				struct mhi_dev *mhi);

/**
@@ -736,7 +762,8 @@ int mhi_memcpy_host2dev(void *dst, uint32_t src_pa, uint32_t len,
 * @buf:	Data buffer that needs to be written to the host.
 * @size:	Data buffer size.
 */
void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size);
void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size,
				struct mhi_dev *mhi);

/**
 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
@@ -745,7 +772,7 @@ void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size);
 * @buf:	Data buffer that needs to be read from the host.
 * @size:	Data buffer size.
 */
void mhi_dev_read_from_host(struct mhi_addr *dst, void *buf, size_t size);
void mhi_dev_read_from_host(struct mhi_addr *dst, dma_addr_t buf, size_t size);

/**
 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
@@ -1092,4 +1119,6 @@ int mhi_pcie_config_db_routing(struct mhi_dev *mhi);
 */
int mhi_uci_init(void);

void mhi_dev_notify_a7_event(struct mhi_dev *mhi);

#endif /* _MHI_H_ */
+45 −28
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@

#include "mhi.h"

static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint32_t p)
static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p)
{
	uint64_t rbase;

@@ -42,7 +42,7 @@ static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring)
}

/* fetch ring elements from stat->end, take care of wrap-around case */
static int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
					uint32_t start, uint32_t end)
{
	struct mhi_addr host_addr;
@@ -51,19 +51,30 @@ static int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
			+ sizeof(union mhi_dev_ring_element_type) * start;
	host_addr.device_va = ring->ring_shadow.device_va
			+ sizeof(union mhi_dev_ring_element_type) * start;
	host_addr.host_pa = ring->ring_shadow.host_pa
			+ sizeof(union mhi_dev_ring_element_type) * start;
	if (start < end) {
		mhi_dev_read_from_host(&host_addr, &ring->ring_cache[start],
			(end-start) * sizeof(union mhi_dev_ring_element_type));
		mhi_dev_read_from_host(&host_addr,
			(ring->ring_cache_dma_handle +
			sizeof(union mhi_dev_ring_element_type) * start),
			(end-start) *
			sizeof(union mhi_dev_ring_element_type));
	} else if (start > end) {
		/* copy from 'start' to ring end, then ring start to 'end'*/
		mhi_dev_read_from_host(&host_addr, &ring->ring_cache[start],
		mhi_dev_read_from_host(&host_addr,
			(ring->ring_cache_dma_handle +
			sizeof(union mhi_dev_ring_element_type) * start),
			(ring->ring_size-start) *
			sizeof(union mhi_dev_ring_element_type));
		if (end) {
			/* wrapped around */
			host_addr.device_pa = ring->ring_shadow.device_pa;
			host_addr.device_va = ring->ring_shadow.device_va;
			mhi_dev_read_from_host(&host_addr, &ring->ring_cache[0],
			host_addr.host_pa = ring->ring_shadow.host_pa;
			mhi_dev_read_from_host(&host_addr,
				(ring->ring_cache_dma_handle +
				sizeof(union mhi_dev_ring_element_type) *
				start),
				end * sizeof(union mhi_dev_ring_element_type));
		}
	}
@@ -94,10 +105,10 @@ int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset)

	old_offset = ring->wr_offset;

	mhi_log(MHI_MSG_VERBOSE,
	mhi_log(MHI_MSG_ERROR,
			"caching - rng size :%d local ofst:%d new ofst: %d\n",
			(uint32_t) ring->ring_size, old_offset,
			(uint32_t)ring->wr_offset);
			ring->wr_offset);

	/*
	 * copy the elements starting from old_offset to wr_offset
@@ -107,16 +118,16 @@ int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset)
	if (ring->id >= mhi_ctx->ev_ring_start &&
		ring->id < (mhi_ctx->ev_ring_start +
				mhi_ctx->cfg.event_rings)) {
		mhi_log(MHI_MSG_VERBOSE,
		mhi_log(MHI_MSG_ERROR,
				"not caching event ring %d\n", ring->id);
		return 0;
	}

	mhi_log(MHI_MSG_VERBOSE, "caching ring %d, start %d, end %d\n",
	mhi_log(MHI_MSG_ERROR, "caching ring %d, start %d, end %d\n",
			ring->id, old_offset, wr_offset);

	if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) {
		mhi_log(MHI_MSG_VERBOSE,
		mhi_log(MHI_MSG_ERROR,
		"failed to fetch elements for ring %d, start %d, end %d\n",
		ring->id, old_offset, wr_offset);
		return -EINVAL;
@@ -144,7 +155,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
			pr_err("%s: CMD DB read failed\n", __func__);
			return rc;
		}
		mhi_log(MHI_MSG_VERBOSE,
		mhi_log(MHI_MSG_ERROR,
			"ring %d wr_offset from db 0x%x\n",
			ring->id, (uint32_t) wr_offset);
		break;
@@ -161,7 +172,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
			pr_err("%s: CH DB read failed\n", __func__);
			return rc;
		}
		mhi_log(MHI_MSG_VERBOSE,
		mhi_log(MHI_MSG_ERROR,
			"ring %d wr_offset from db 0x%x\n",
			ring->id, (uint32_t) wr_offset);
		break;
@@ -213,7 +224,7 @@ int mhi_dev_process_ring(struct mhi_dev_ring *ring)

	rc = mhi_dev_update_wr_offset(ring);
	if (rc) {
		mhi_log(MHI_MSG_VERBOSE,
		mhi_log(MHI_MSG_ERROR,
				"Error updating write-offset for ring %d\n",
				ring->id);
		return rc;
@@ -230,23 +241,21 @@ int mhi_dev_process_ring(struct mhi_dev_ring *ring)
	while (ring->rd_offset != ring->wr_offset) {
		rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
		if (rc) {
			mhi_log(MHI_MSG_VERBOSE,
			mhi_log(MHI_MSG_ERROR,
				"Error processing ring (%d) element (%d)\n",
				ring->id, ring->rd_offset);
			return rc;
		}

		mhi_log(MHI_MSG_VERBOSE,
		mhi_log(MHI_MSG_ERROR,
			"Processing ring (%d) rd_offset:%d, wr_offset:%d\n",
			ring->id, ring->rd_offset, ring->wr_offset);

		mhi_dev_ring_inc_index(ring, ring->rd_offset);
	}

	if (ring->rd_offset == ring->wr_offset) {
		return 0;
	} else {
		mhi_log(MHI_MSG_VERBOSE,
	if !(ring->rd_offset == ring->wr_offset) {
		mhi_log(MHI_MSG_ERROR,
				"Error with the rd offset/wr offset\n");
		return -EINVAL;
	}
@@ -284,17 +293,17 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
	 * Write the element, ring_base has to be the
	 * iomap of the ring_base for memcpy
	 */
	host_addr.device_pa = ring->ring_shadow.device_pa +
	host_addr.host_pa = ring->ring_shadow.host_pa +
			sizeof(union mhi_dev_ring_element_type) * old_offset;
	host_addr.device_va = ring->ring_shadow.device_va +
			sizeof(union mhi_dev_ring_element_type) * old_offset;

	mhi_log(MHI_MSG_VERBOSE, "adding element to ring (%d)\n", ring->id);
	mhi_log(MHI_MSG_VERBOSE, "rd_ofset %d\n", ring->rd_offset);
	mhi_log(MHI_MSG_VERBOSE, "type %d\n", element->generic.type);
	mhi_log(MHI_MSG_ERROR, "adding element to ring (%d)\n", ring->id);
	mhi_log(MHI_MSG_ERROR, "rd_ofset %d\n", ring->rd_offset);
	mhi_log(MHI_MSG_ERROR, "type %d\n", element->generic.type);

	mhi_dev_write_to_host(&host_addr, element,
			sizeof(union mhi_dev_ring_element_type));
			sizeof(union mhi_dev_ring_element_type), ring->mhi_dev);

	return 0;
}
@@ -325,15 +334,20 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
	wr_offset = mhi_dev_ring_addr2ofst(ring,
					ring->ring_ctx->generic.wp);

	ring->ring_cache = devm_kzalloc(mhi->dev,
	ring->ring_cache = dma_alloc_coherent(mhi->dev,
			ring->ring_size *
			sizeof(union mhi_dev_ring_element_type), GFP_KERNEL);
			sizeof(union mhi_dev_ring_element_type),
			&ring->ring_cache_dma_handle,
			GFP_KERNEL);
	if (!ring->ring_cache)
		return -ENOMEM;

	offset = (uint32_t)(ring->ring_ctx->generic.rbase -
					mhi->ctrl_base.host_pa);

	ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset;
	ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset;
	ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset;

	if (ring->type == RING_TYPE_ER)
		ring->ring_ctx_shadow =
@@ -348,13 +362,16 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
		(union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
		(ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));


	ring->ring_ctx_shadow = ring->ring_ctx;

	if (ring->type != RING_TYPE_ER) {
		rc = mhi_dev_cache_ring(ring, wr_offset);
		if (rc)
			return rc;
	}

	mhi_log(MHI_MSG_VERBOSE, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n",
	mhi_log(MHI_MSG_ERROR, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n",
			(uint32_t)ring->ring_ctx->generic.rbase,
			(uint32_t)ring->ring_ctx->generic.rp,
			(uint32_t)ring->ring_ctx->generic.wp);
+11 −0
Original line number Diff line number Diff line
@@ -151,6 +151,9 @@ static inline const char *mhi_sm_pcie_event_str(enum ep_pcie_event event)
	case EP_PCIE_EVENT_PM_D0:
		str = "EP_PCIE_PM_D0_EVENT";
		break;
	case EP_PCIE_EVENT_MHI_A7:
		str = "EP_PCIE_MHI_A7";
		break;
	default:
		str = "INVALID_PCIE_EVENT";
		break;
@@ -409,6 +412,9 @@ static bool mhi_sm_is_legal_pcie_event_on_state(enum mhi_dev_state curr_mstate,
		res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
			curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE);
		break;
	case EP_PCIE_EVENT_MHI_A7:
		res = true;
		break;
	default:
		MHI_SM_ERR("Invalid ep_pcie event, received: %s\n",
			mhi_sm_pcie_event_str(event));
@@ -1143,6 +1149,11 @@ void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify)
		MHI_SM_ERR("got %s, ERROR occurred\n",
			mhi_sm_pcie_event_str(event));
		break;
	case EP_PCIE_EVENT_MHI_A7:
		ep_pcie_mask_irq_event(mhi_sm_ctx->mhi_dev->phandle,
				EP_PCIE_INT_EVT_MHI_A7, false);
		mhi_dev_notify_a7_event(mhi_sm_ctx->mhi_dev);
		goto exit;
	default:
		MHI_SM_ERR("Invalid ep_pcie event, received 0x%x event\n",
			event);
+8 −5
Original line number Diff line number Diff line
@@ -33,7 +33,6 @@
#define MHI_MAX_NR_OF_CLIENTS		23
#define MHI_SOFTWARE_CLIENT_START	0
#define MHI_SOFTWARE_CLIENT_LIMIT	(MHI_MAX_SOFTWARE_CHANNELS/2)
#define TRB_MAX_DATA_SIZE		4096
#define MHI_UCI_IPC_LOG_PAGES		(100)

#define MAX_NR_TRBS_PER_CHAN		1
@@ -250,6 +249,7 @@ static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait)
static int open_client_mhi_channels(struct uci_client *uci_client)
{
	int rc = 0;

	uci_log(UCI_DBG_DBG,
			"Starting channels %d %d.\n",
			uci_client->out_chan,
@@ -342,6 +342,7 @@ static int mhi_uci_client_release(struct inode *mhi_inode,
	int rc = 0;
	int in_chan = 0;
	u32 buf_size = 0;

	in_chan = iminor(mhi_inode) + 1;
	nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs;
	buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size;
@@ -539,8 +540,9 @@ static ssize_t mhi_uci_client_write(struct file *file,
	if (file == NULL || buf == NULL ||
			!count || file->private_data == NULL)
		return -EINVAL;
	else

	uci_handle = file->private_data;

	if (atomic_read(&uci_ctxt.mhi_disabled)) {
		uci_log(UCI_DBG_ERROR,
			"Client %d attempted to write while MHI is disabled\n",
@@ -679,7 +681,7 @@ static long mhi_uci_client_ioctl(struct file *file, unsigned cmd,

	if (file == NULL || file->private_data == NULL)
		return -EINVAL;
	else

	uci_handle = file->private_data;

	uci_log(UCI_DBG_DBG, "Received command %d for client:%d\n",
@@ -734,6 +736,7 @@ int mhi_uci_init(void)
	int ret_val = 0;
	struct uci_client *mhi_client = NULL;
	s32 r = 0;

	mhi_uci_ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES,
						"mhi-uci", 0);
	if (mhi_uci_ipc_log == NULL) {