Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed86e7c8 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mhi: core: Add mdmcalifornium as a supported MDM by MHI"

parents c154f0af 2e68c884
Loading
Loading
Loading
Loading
+10 −2
Original line number Diff line number Diff line
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -121,11 +121,19 @@ static ssize_t bhi_write(struct file *file,
	mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);

	for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
		u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;

		err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
		errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
		errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
		errdbg3 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG3);
		tx_db_val = mhi_reg_read_field(bhi_ctxt->bhi_base,
						BHI_STATUS,
						BHI_STATUS_MASK,
						BHI_STATUS_SHIFT);
		mhi_log(MHI_MSG_CRITICAL, "BHI STATUS 0x%x\n", tx_db_val);
		mhi_log(MHI_MSG_CRITICAL,
		"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
			tx_db_val, err, errdbg1, errdbg2, errdbg3);
		if (BHI_STATUS_SUCCESS != tx_db_val)
			mhi_log(MHI_MSG_CRITICAL,
				"Incorrect BHI status: %d retry: %d\n",
+7 −1
Original line number Diff line number Diff line
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,8 @@ static DEFINE_PCI_DEVICE_TABLE(mhi_pcie_device_id) = {
		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
	{ MHI_PCIE_VENDOR_ID, MHI_PCIE_DEVICE_ID_ZIRC,
		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
	{ MHI_PCIE_VENDOR_ID, MHI_PCIE_DEVICE_ID_9x55,
		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
	{ 0, },
};

@@ -319,6 +321,10 @@ DECLARE_PCI_FIXUP_HEADER(MHI_PCIE_VENDOR_ID,
		MHI_PCIE_DEVICE_ID_9x35,
		mhi_msm_fixup);

DECLARE_PCI_FIXUP_HEADER(MHI_PCIE_VENDOR_ID,
		MHI_PCIE_DEVICE_ID_9x55,
		mhi_msm_fixup);

DECLARE_PCI_FIXUP_HEADER(MHI_PCIE_VENDOR_ID,
		MHI_PCIE_DEVICE_ID_ZIRC,
		mhi_msm_fixup);
+4 −76
Original line number Diff line number Diff line
@@ -71,9 +71,8 @@ ev_mutex_free:

size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int i, r;
	int i = 0;
	size_t mhi_dev_mem = 0;
	struct mhi_chan_info chan_info;

	/* Calculate size needed for contexts */
	mhi_dev_mem += (MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt)) +
@@ -90,23 +89,6 @@ size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
		mhi_dev_mem += (sizeof(union mhi_event_pkt) *
				mhi_dev_ctxt->ev_ring_props[i].nr_desc);

	/* Calculate size needed for xfer TREs and bounce buffers */
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		if (VALID_CHAN_NR(i)) {
			r = get_chan_props(mhi_dev_ctxt, i, &chan_info);
			if (r)
				continue;
			/* Add size of TREs */
			mhi_dev_mem += (sizeof(union mhi_xfer_pkt) *
					chan_info.max_desc);
			/* Add bounce buffer size */
			if (mhi_dev_ctxt->flags.bb_enabled) {
				mhi_log(MHI_MSG_INFO,
					"Enabling BB list, chan %d\n", i);
				/*mhi_dev_mem += (MAX_BOUNCE_BUF_SIZE *
						chan_info.max_desc); */
			}
		}
	mhi_log(MHI_MSG_INFO, "Final bytes for MHI device space %zd\n",
				mhi_dev_mem);
	return mhi_dev_mem;
@@ -201,20 +183,6 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
	return 0;
}


static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
	bb_ctxt->el_size = sizeof(struct mhi_buf_info);
	bb_ctxt->len     = bb_ctxt->el_size * nr_el;
	bb_ctxt->base    = kzalloc(bb_ctxt->len, GFP_KERNEL);
	bb_ctxt->wp	 = bb_ctxt->base;
	bb_ctxt->rp	 = bb_ctxt->base;
	bb_ctxt->ack_rp  = bb_ctxt->base;
	if (!bb_ctxt->base)
		return -ENOMEM;
	return 0;
}

/*
 * The device can have severe addressing limitations, and in this case
 * the MHI driver may be restricted on where memory can be allocated.
@@ -311,7 +279,7 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
					calculate_mhi_space(mhi_dev_ctxt);

	mhi_dev_ctxt->dev_space.dev_mem_start =
		dma_alloc_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
		dma_alloc_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
				    mhi_dev_ctxt->dev_space.dev_mem_len,
				   &mhi_dev_ctxt->dev_space.dma_dev_mem_start,
				    GFP_KERNEL);
@@ -392,49 +360,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
				(u64)dma_dev_mem_start + mhi_mem_index);
		mhi_mem_index += ring_len;
	}

	/* Initialize both the local and device xfer contexts */
	for (i = 0; i < MHI_MAX_CHANNELS; ++i)
		if (VALID_CHAN_NR(i)) {
			struct mhi_chan_info chan_info;

			r = get_chan_props(mhi_dev_ctxt, i, &chan_info);
			if (r)
				continue;
			mhi_log(MHI_MSG_INFO, "Initializing chan ctxt %d\n", i);
			ring_len = (sizeof(union mhi_xfer_pkt) *
							chan_info.max_desc);
			init_dev_chan_ctxt(
				&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i],
				dma_dev_mem_start + mhi_mem_index,
				ring_len, chan_info.ev_ring);
			/* TODO: May not need to do this. It would be best for
			 *	the client to set it during chan open */
			mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i].
						mhi_chan_type = (i % 2) + 1;
			init_local_chan_ctxt(
				&mhi_dev_ctxt->mhi_local_chan_ctxt[i],
				dev_mem_start + mhi_mem_index,
				ring_len);
			/* TODO: May not need to do this. It would be best for
			 *	the client to set it during chan open */
			mhi_dev_ctxt->mhi_local_chan_ctxt[i].dir = (i % 2) + 1;
			/* Add size of TREs */
			mhi_mem_index += ring_len;
			if (mhi_dev_ctxt->flags.bb_enabled) {
				r = enable_bb_ctxt(
						&mhi_dev_ctxt->chan_bb_list[i],
						chan_info.max_desc);
				if (r)
					goto error_during_bb_list;
			}
		}
	return 0;

error_during_bb_list:
	for (; i >= 0; --i)
		kfree(mhi_dev_ctxt->chan_bb_list[i].base);
	dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
	dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
			   mhi_dev_ctxt->dev_space.dev_mem_len,
			   mhi_dev_ctxt->dev_space.dev_mem_start,
			   mhi_dev_ctxt->dev_space.dma_dev_mem_start);
@@ -629,7 +557,7 @@ error_during_thread_init:
	kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
	kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
error_wq_init:
	dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
	dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
		   mhi_dev_ctxt->dev_space.dev_mem_len,
		   mhi_dev_ctxt->dev_space.dev_mem_start,
		   mhi_dev_ctxt->dev_space.dma_dev_mem_start);
+13 −2
Original line number Diff line number Diff line
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,8 @@
#define MHI_RPM_AUTOSUSPEND_TMR_VAL_MS 1000
#define MAX_BUF_SIZE 32

#define HW_EVENT_RINGS_ALLOCATED 2

#define PRIMARY_CMD_RING 0
#define MHI_WORK_Q_MAX_SIZE 128

@@ -44,13 +46,14 @@
#define MHI_PCIE_VENDOR_ID 0x17CB
#define MHI_PCIE_DEVICE_ID_9x35 0x0300
#define MHI_PCIE_DEVICE_ID_ZIRC 0x0301
#define MHI_PCIE_DEVICE_ID_9x55 0x0302
#define TRB_MAX_DATA_SIZE 0x1000


#define MHI_DATA_SEG_WINDOW_START_ADDR 0x0ULL
#define MHI_DATA_SEG_WINDOW_END_ADDR 0x3E800000ULL

#define MHI_M2_DEBOUNCE_TMR_MS 10

#define MHI_XFER_DB_INTERVAL 8
#define MHI_EV_DB_INTERVAL 1

@@ -92,6 +95,14 @@
#define MHI_THREAD_SLEEP_TIMEOUT_MS 20
#define MHI_RESUME_WAKE_RETRIES 20

#define IS_HW_EV_RING(_mhi_dev_ctxt, _EV_INDEX) (_EV_INDEX >= \
				((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
				HW_EVENT_RINGS_ALLOCATED))

#define IS_SW_EV_RING(_mhi_dev_ctxt, _EV_INDEX) (_EV_INDEX < \
				((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
				HW_EVENT_RINGS_ALLOCATED))

/* Debugging Capabilities*/
#define MHI_DBG_MAX_EVENT_HISTORY 10

+97 −6
Original line number Diff line number Diff line
@@ -29,6 +29,19 @@
#include "mhi_macros.h"
#include "mhi_trace.h"

static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
	bb_ctxt->el_size = sizeof(struct mhi_buf_info);
	bb_ctxt->len     = bb_ctxt->el_size * nr_el;
	bb_ctxt->base    = kzalloc(bb_ctxt->len, GFP_KERNEL);
	bb_ctxt->wp	 = bb_ctxt->base;
	bb_ctxt->rp	 = bb_ctxt->base;
	bb_ctxt->ack_rp  = bb_ctxt->base;
	if (!bb_ctxt->base)
		return -ENOMEM;
	return 0;
}

static void mhi_write_db(struct mhi_device_ctxt *mhi_dev_ctxt,
		  void __iomem *io_addr_lower,
		  uintptr_t chan, u64 val)
@@ -188,6 +201,69 @@ int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
	return r;
}

int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
			  struct mhi_chan_ctxt *cc_list,
			  struct mhi_ring *ring)
{
	if (cc_list == NULL || ring == NULL)
		return -EINVAL;

	dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
			ring->len, ring->base,
			 cc_list->mhi_trb_ring_base_addr);
	mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
				MHI_CHAN_STATE_DISABLED);
	return 0;
}

void free_tre_ring(struct mhi_client_handle *client_handle)
{
	struct mhi_chan_ctxt *chan_ctxt;
	struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	int chan = client_handle->chan_info.chan_nr;
	int r;

	chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
	r = mhi_release_chan_ctxt(mhi_dev_ctxt, chan_ctxt,
				&mhi_dev_ctxt->mhi_local_chan_ctxt[chan]);
	if (r)
		mhi_log(MHI_MSG_ERROR,
		"Failed to release chan %d ret %d\n", chan, r);
}

static int populate_tre_ring(struct mhi_client_handle *client_handle)
{
	dma_addr_t ring_dma_addr;
	void *ring_local_addr;
	struct mhi_chan_ctxt *chan_ctxt;
	struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
	u32 chan = client_handle->chan_info.chan_nr;
	u32 nr_desc = client_handle->chan_info.max_desc;

	mhi_log(MHI_MSG_INFO,
		"Entered chan %d requested desc %d\n", chan, nr_desc);

	chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
	ring_local_addr = dma_alloc_coherent(
				&mhi_dev_ctxt->dev_info->pcie_device->dev,
				 nr_desc * sizeof(union mhi_xfer_pkt),
				 &ring_dma_addr, GFP_KERNEL);

	if (ring_local_addr == NULL)
		return -ENOMEM;

	mhi_init_chan_ctxt(chan_ctxt, ring_dma_addr,
			   (uintptr_t)ring_local_addr,
			   nr_desc,
			   GET_CHAN_PROPS(CHAN_DIR,
				client_handle->chan_info.flags),
			   client_handle->chan_info.ev_ring,
			   &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
			   MHI_CHAN_STATE_ENABLED);
	mhi_log(MHI_MSG_INFO, "Exited\n");
	return 0;
}

enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle)
{
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
@@ -217,10 +293,24 @@ enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle)
			chan, mhi_dev_ctxt->dev_exec_env);
		return MHI_STATUS_DEVICE_NOT_READY;
	}

	r = populate_tre_ring(client_handle);
	if (r) {
		mhi_log(MHI_MSG_ERROR,
			"Failed to initialize tre ring chan %d ret %d\n",
			chan, r);
		return r;
	}
	client_handle->event_ring_index =
		mhi_dev_ctxt->dev_space.ring_ctxt.
				cc_list[chan].mhi_event_ring_index;
	r = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
			client_handle->chan_info.max_desc);
	if (r) {
		mhi_log(MHI_MSG_ERROR,
			"Failed to initialize bb ctxt chan %d ret %d\n",
			chan, r);
		return MHI_STATUS_ERROR;
	}

	client_handle->msi_vec =
		mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
@@ -333,6 +423,7 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
	}

	mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan);
	free_tre_ring(client_handle);
	mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan);
	client_handle->chan_status = 0;
}
@@ -480,7 +571,7 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
				bb_info->dir);
		mhi_log(MHI_MSG_RAW, "Allocating BB, chan %d\n", chan);
		bb_info->bb_v_addr = dma_alloc_coherent(
				&mhi_dev_ctxt->dev_info->plat_dev->dev,
				&mhi_dev_ctxt->dev_info->pcie_device->dev,
				bb_info->buf_len,
				&bb_info->bb_p_addr,
				GFP_ATOMIC);
@@ -510,7 +601,7 @@ static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
				 bb->bb_p_addr, bb->buf_len, bb->dir);
	else
		/* This buffer was bounced */
		dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
		dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
				  bb->buf_len,
				  bb->bb_v_addr,
				  bb->bb_p_addr);
@@ -1013,7 +1104,7 @@ enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt,

		/* Get the TRB this event points to */
		local_ev_trb_loc = (void *)mhi_p2v_addr(mhi_dev_ctxt,
					MHI_RING_TYPE_EVENT_RING, event_id,
					MHI_RING_TYPE_XFER_RING, chan,
					phy_ev_trb_loc);
		local_trb_loc = (union mhi_xfer_pkt *)local_chan_ctxt->rp;

@@ -1148,12 +1239,12 @@ enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
		added_xfer_pkt->data_tx_pkt =
				*(struct mhi_tx_pkt *)removed_xfer_pkt;
	} else if (MHI_RING_TYPE_EVENT_RING == ring_type) {

		spinlock_t *lock;
		unsigned long flags;

		if (ring_index >= mhi_dev_ctxt->mmio_info.nr_event_rings)
			return MHI_STATUS_ERROR;

		lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
		spin_lock_irqsave(lock, flags);
		db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
@@ -1586,7 +1677,7 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
	/* Event Doorbell and Polling mode Disabled */
	} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
		/* Only ring for software channel */
		if (IS_SOFTWARE_CHANNEL(chan) ||
		if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
		    !mhi_dev_ctxt->flags.uldl_enabled) {
			mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
			mhi_dev_ctxt->flags.db_mode[chan] = 0;
Loading