Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c75f382 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: mhi_dev: mhi dev net driver performance improvement"

parents 9752be10 bd86be08
Loading
Loading
Loading
Loading
+446 −124

File changed.

Preview size limit exceeded, changes collapsed.

+83 −26
Original line number Diff line number Diff line
@@ -274,6 +274,13 @@ struct mhi_config {
#define MHI_MASK_ROWS_CH_EV_DB		4
#define TRB_MAX_DATA_SIZE		8192
#define MHI_CTRL_STATE			25
#define IPA_DMA_SYNC                    1
#define IPA_DMA_ASYNC                   0

/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS			50
/*maximum event requests */
#define MHI_MAX_EVT_REQ			50

/* Possible ring element types */
union mhi_dev_ring_element_type {
@@ -358,6 +365,16 @@ enum mhi_ctrl_info {
	MHI_STATE_INVAL,
};

enum mhi_dev_tr_compl_evt_type {
	SEND_EVENT_BUFFER,
	SEND_EVENT_RD_OFFSET,
};

enum mhi_dev_transfer_type {
	MHI_DEV_DMA_SYNC,
	MHI_DEV_DMA_ASYNC,
};

struct mhi_dev_channel;

struct mhi_dev_ring {
@@ -433,14 +450,30 @@ struct mhi_dev_client {
	uint32_t			nr_iov;
};

struct ring_cache_req {
	struct completion	*done;
	void			*context;
};

struct event_req {
	union mhi_dev_ring_element_type *tr_events;
	u32			num_events;
	dma_addr_t		dma;
	u32			dma_len;
	dma_addr_t		event_rd_dma;
	void			*context;
	enum mhi_dev_tr_compl_evt_type event_type;
	u32			event_ring;
	void			(*client_cb)(void *req);
	struct list_head	list;
};

struct mhi_dev_channel {
	struct list_head		list;
	struct list_head		clients;
	/* synchronization for changing channel state,
	 * adding/removing clients, mhi_dev callbacks, etc
	 */
	spinlock_t			lock;

	struct mhi_dev_ring		*ring;

	enum mhi_dev_channel_state	state;
@@ -450,6 +483,9 @@ struct mhi_dev_channel {
	/* client which the current inbound/outbound message is for */
	struct mhi_dev_client		*active_client;

	struct list_head		event_req_buffers;
	struct event_req		*curr_ereq;

	/* current TRE being processed */
	uint64_t			tre_loc;
	/* current TRE size */
@@ -476,6 +512,7 @@ struct mhi_dev {
	struct mhi_config		cfg;
	bool				mmio_initialized;

	spinlock_t			lock;
	/* Host control base information */
	struct mhi_host_addr		host_addr;
	struct mhi_addr			ctrl_base;
@@ -502,6 +539,7 @@ struct mhi_dev {

	/* Scheduler work */
	struct work_struct		chdb_ctrl_work;

	struct mutex			mhi_lock;
	struct mutex			mhi_event_lock;

@@ -528,6 +566,7 @@ struct mhi_dev {
	u32                             ifc_id;
	struct ep_pcie_hw               *phandle;
	struct work_struct		pcie_event;
	struct ep_pcie_msi_config	msi_cfg;

	atomic_t			write_active;
	atomic_t			is_suspended;
@@ -566,6 +605,23 @@ struct mhi_dev {
	bool				mhi_int;
};

struct mhi_req {
	u32                             chan;
	u32                             mode;
	u32				chain;
	void                            *buf;
	dma_addr_t                      dma;
	u32                             snd_cmpl;
	void                            *context;
	size_t                          len;
	size_t                          actual_len;
	uint32_t                        rd_offset;
	struct mhi_dev_client           *client;
	struct list_head                list;
	union mhi_dev_ring_element_type *el;
	void (*client_cb)(void *req);
};

enum mhi_msg_level {
	MHI_MSG_VERBOSE = 0x0,
	MHI_MSG_INFO = 0x1,
@@ -674,24 +730,21 @@ int mhi_dev_close_channel(struct mhi_dev_client *handle_client);

/**
 * mhi_dev_read_channel() - Channel read for a given client
 * @handle_client:	Client Handle issued during mhi_dev_open_channel
 * @buf: Pointer to the buffer used by the MHI core to copy the data received
 *	 from the Host.
 * @buf_size: Size of the buffer pointer.
 * @chain : Indicate if the recieved data is part of chained packet.
 * @mreq:       mreq is the client argument which includes meta info
 *              like write data location, buffer len, read offset, mode,
 *              chain and client call back function which will be invoked
 *              when data read is completed.
 */
int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
				void *buf, uint32_t buf_size, uint32_t *chain);
int mhi_dev_read_channel(struct mhi_req *mreq);

/**
 * mhi_dev_write_channel() - Channel write for a given software client.
 * @handle_client:	Client Handle issued during mhi_dev_open_channel
 * @buf: Pointer to the buffer used by the MHI core to copy the data from the
 *	 device to the host.
 * @buf_size: Size of the buffer pointer.
 * @wreq	wreq is the client argument which includes meta info like
 *              client handle, read data location, buffer length, mode,
 *              and client call back function which will free the packet.
 *              when data write is completed.
 */
int mhi_dev_write_channel(struct mhi_dev_client *handle_client, void *buf,
							uint32_t buf_size);
int mhi_dev_write_channel(struct mhi_req *wreq);

/**
 * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process.
@@ -764,8 +817,8 @@ int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset);
 * @element:	Transfer ring element to be copied to the host memory.
 */
int mhi_dev_add_element(struct mhi_dev_ring *ring,
				union mhi_dev_ring_element_type *element);

				union mhi_dev_ring_element_type *element,
				struct event_req *ereq, int evt_offset);
/**
 * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
 *		from device to the host.
@@ -773,9 +826,10 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
 * @src:	Source virtual address.
 * @len:	Numer of bytes to be transferred.
 * @mhi:	MHI dev structure.
 * @req:        mhi_req structure
 */
int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
				struct mhi_dev *mhi);
				struct mhi_dev *mhi, struct mhi_req *req);

/**
 * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
@@ -784,9 +838,10 @@ int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
 * @src_pa:	Source physical address.
 * @len:	Numer of bytes to be transferred.
 * @mhi:	MHI dev structure.
 * @req:        mhi_req structure
 */
int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
				struct mhi_dev *mhi);
				struct mhi_dev *mhi, struct mhi_req *mreq);

/**
 * mhi_dev_write_to_host() - Transfer data from device to host.
@@ -795,9 +850,8 @@ int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
 * @buf:	Data buffer that needs to be written to the host.
 * @size:	Data buffer size.
 */
void mhi_dev_write_to_host(struct mhi_dev *mhi,
				struct mhi_addr *mhi_transfer);

void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *mhi_transfer,
		struct event_req *ereq, enum mhi_dev_transfer_type type);
/**
 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
 *		from host to device.
@@ -881,6 +935,7 @@ int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
 * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
 * @dev:	MHI device structure.
 */

int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev);

/**
@@ -1157,9 +1212,11 @@ int mhi_pcie_config_db_routing(struct mhi_dev *mhi);
int mhi_uci_init(void);

/**
 * mhi_dev_net_interface_init() - Enable Network stack interface for MHI device
 *		which exposes the virtual network interface.
 **/
 * mhi_dev_net_interface_init() - Initializes the mhi device network interface
 *		which exposes the virtual network interface (mhi_dev_net0).
 *		data packets will transfer between MHI host interface (mhi_swip)
 *		and mhi_dev_net interface using software path
 */
int mhi_dev_net_interface_init(void);

void mhi_dev_notify_a7_event(struct mhi_dev *mhi);
+215 −111
Original line number Diff line number Diff line
@@ -30,8 +30,10 @@

#define MHI_NET_DRIVER_NAME  "mhi_dev_net_drv"
#define MHI_NET_DEV_NAME     "mhi_dev_net%d"
#define MHI_NET_DEFAULT_MTU   4000
#define MHI_NET_DEFAULT_MTU   8192
#define MHI_NET_IPC_PAGES     (100)
#define MHI_MAX_RX_REQ        (128)
#define MHI_MAX_TX_REQ        (128)

enum mhi_dev_net_dbg_lvl {
	MHI_VERBOSE = 0x1,
@@ -44,7 +46,7 @@ enum mhi_dev_net_dbg_lvl {
};

static enum mhi_dev_net_dbg_lvl mhi_net_msg_lvl = MHI_CRITICAL;
static enum mhi_dev_net_dbg_lvl mhi_net_ipc_log_lvl = MHI_INFO;
static enum mhi_dev_net_dbg_lvl mhi_net_ipc_log_lvl = MHI_VERBOSE;
static void *mhi_net_ipc_log;

enum mhi_chan_dir {
@@ -92,16 +94,16 @@ struct mhi_dev_net_client {
	struct workqueue_struct *pending_pckt_wq;
	struct work_struct       xmit_work;
	/*Read data from host work queue*/
	struct workqueue_struct *read_data_wq;
	struct work_struct       dev_read_wrk;
	atomic_t pckt_queue_count;
	atomic_t  rx_enabled;
	atomic_t  tx_enabled;
	struct net_device *dev;
	struct sk_buff_head tx_buffers;
	struct list_head rx_buffers;
	struct list_head wr_req_buffers;
	struct mhi_dev_net_ctxt *net_ctxt;
	/*To check write channel is empty or not*/
	spinlock_t write_chan_lock;
	spinlock_t wrt_lock;
	spinlock_t rd_lock;
	struct mutex in_chan_lock;
	struct mutex out_chan_lock;
};
@@ -115,17 +117,6 @@ struct mhi_dev_net_ctxt {
static struct mhi_dev_net_ctxt mhi_net_ctxt;
static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *);

static void mhi_dev_net_rx_scheduler(struct work_struct *work)
{
	struct mhi_dev_net_client *mhi_dev_net_client = container_of(work,
			struct mhi_dev_net_client, dev_read_wrk);

	if (mhi_dev_net_client)
		mhi_dev_net_client_read(mhi_dev_net_client);
	else
		mhi_dev_net_log(MHI_CRITICAL, "mhi_dev_net client is NULL\n");
}

static int mhi_dev_net_init_ch_attributes(struct mhi_dev_net_ctxt *mhi_ctxt)
{
	u32 channel = 0;
@@ -149,43 +140,61 @@ static int mhi_dev_net_init_ch_attributes(struct mhi_dev_net_ctxt *mhi_ctxt)
	return 0;
}

static void process_queue_packets(struct work_struct *work)
static void mhi_dev_net_process_queue_packets(struct work_struct *work)
{
	u32 xfer_data = 0;
	ktime_t start_time;

	struct mhi_dev_net_client *mhi_net_client = container_of(work,
	struct mhi_dev_net_client *client = container_of(work,
			struct mhi_dev_net_client, xmit_work);
	if (mhi_dev_channel_isempty(mhi_net_client->in_handle)) {
		netif_stop_queue(mhi_net_client->dev);
	unsigned long flags = 0;
	int xfer_data = 0;
	struct sk_buff *skb = NULL;
	struct mhi_req *wreq = NULL;

	if (mhi_dev_channel_isempty(client->in_handle)) {
		mhi_dev_net_log(MHI_INFO, "%s stop network xmmit\n", __func__);
		netif_stop_queue(client->dev);
		return;
	}
	while (!skb_queue_empty(&(mhi_net_client->tx_buffers))) {
		struct sk_buff *skb =
			skb_dequeue(&(mhi_net_client->tx_buffers));
		atomic_dec(&mhi_net_client->pckt_queue_count);
	while (!((skb_queue_empty(&client->tx_buffers)) ||
			(list_empty(&client->wr_req_buffers)))) {
		spin_lock_irqsave(&client->wrt_lock, flags);
		skb = skb_dequeue(&(client->tx_buffers));
		if (!skb) {
			mhi_dev_net_log(MHI_CRITICAL,
					"skb dequeue returned NULL\n");
			mhi_dev_net_log(MHI_INFO,
					"SKB is NULL from dequeue\n");
			spin_unlock_irqrestore(&client->wrt_lock, flags);
			return;
		}
		start_time = ktime_get();
		xfer_data =
			mhi_dev_write_channel(mhi_net_client->in_handle,
					skb->data, skb->len);
		if (xfer_data != skb->len) {
			pr_err("Failed to write skb len %d xfered data %d\n",
					skb->len, xfer_data);
		wreq = container_of(client->wr_req_buffers.next,
				struct mhi_req, list);
		list_del_init(&wreq->list);

		wreq->client = client->in_handle;
		wreq->context = skb;
		wreq->buf = skb->data;
		wreq->len = skb->len;
		wreq->chan = client->in_chan;
		wreq->mode = IPA_DMA_ASYNC;
		if (skb_queue_empty(&client->tx_buffers) ||
				list_empty(&client->wr_req_buffers)) {
			wreq->snd_cmpl = 1;
		} else
			wreq->snd_cmpl = 0;
		spin_unlock_irqrestore(&client->wrt_lock, flags);
		xfer_data = mhi_dev_write_channel(wreq);
		if (xfer_data <= 0) {
			pr_err("%s(): Failed to write skb len %d\n",
					__func__, skb->len);
			kfree_skb(skb);
			return;
		}
		mhi_net_client->dev->stats.tx_packets++;
		mhi_dev_net_log(MHI_VERBOSE, "write_chan time = %lld\n",
			ktime_to_us(ktime_sub(ktime_get(), start_time)));
		kfree_skb(skb);
		/* Check if free buffers availability */
		if (mhi_dev_channel_isempty(mhi_net_client->in_handle)) {
			netif_stop_queue(mhi_net_client->dev);
		client->dev->stats.tx_packets++;

		/* Check if free buffers are available*/
		if (mhi_dev_channel_isempty(client->in_handle)) {
			mhi_dev_net_log(MHI_INFO,
					"%s buffers are full stop xmit\n",
					__func__);
			netif_stop_queue(client->dev);
			break;
		}
	} /* While TX queue is not empty */
@@ -193,26 +202,17 @@ static void process_queue_packets(struct work_struct *work)

static void mhi_dev_net_event_notifier(struct mhi_dev_client_cb_reason *reason)
{
	struct mhi_dev_net_client *mhi_handle = NULL;
	struct mhi_dev_net_client *client_handle = mhi_net_ctxt.client_handle;

	if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
		mhi_handle = mhi_net_ctxt.client_handle;
		mhi_dev_net_log(MHI_VERBOSE,
				"recived TRE available event for chan %d\n",
				mhi_handle->in_chan);
		if (reason->ch_id % 2) {
			spin_lock(&mhi_handle->write_chan_lock);
			if (netif_queue_stopped(mhi_handle->dev)) {
				if (atomic_read(&mhi_handle->pckt_queue_count))
					queue_work(mhi_handle->pending_pckt_wq,
							&mhi_handle->xmit_work);
				else
					netif_wake_queue(mhi_handle->dev);
			}
			spin_unlock(&mhi_handle->write_chan_lock);
			if (netif_queue_stopped(client_handle->dev)) {
				netif_wake_queue(client_handle->dev);
				queue_work(client_handle->pending_pckt_wq,
						&client_handle->xmit_work);
			}
		} else
			queue_work(mhi_handle->read_data_wq,
					&mhi_handle->dev_read_wrk);
			mhi_dev_net_client_read(client_handle);
	}
}

@@ -235,61 +235,144 @@ static __be16 mhi_dev_net_eth_type_trans(struct sk_buff *skb)
	return protocol;
}

static void mhi_dev_net_read_completion_cb(void *req)
{
	struct mhi_dev_net_client *net_handle =
		mhi_net_ctxt.client_handle;
	struct mhi_req *mreq =
		(struct mhi_req *)req;
	struct sk_buff *skb = mreq->context;
	unsigned long   flags;

	skb->len = mreq->actual_len;
	skb->protocol =
		mhi_dev_net_eth_type_trans(skb);
	skb_put(skb, mreq->actual_len);
	net_handle->dev->stats.rx_packets++;
	skb->dev = net_handle->dev;
	netif_rx(skb);
	spin_lock_irqsave(&net_handle->rd_lock, flags);
	list_add_tail(&mreq->list, &net_handle->rx_buffers);
	spin_unlock_irqrestore(&net_handle->rd_lock, flags);
}

static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *mhi_handle)
{
	int bytes_avail = 0;
	int ret_val = 0;
	u32 chan = 0;
	uint32_t buf_size = TRB_MAX_DATA_SIZE;
	uint32_t chained = 0;
	struct mhi_dev_client *client_handle = NULL;
	struct sk_buff *skb_buff;
	ktime_t start_time;
	struct mhi_req *req;
	struct sk_buff *skb;
	unsigned long   flags;

	client_handle = mhi_handle->out_handle;
	chan = mhi_handle->out_chan;
	if (!atomic_read(&mhi_handle->rx_enabled))
		return -EPERM;
	do {
		start_time = ktime_get();
		skb_buff = alloc_skb(MHI_NET_DEFAULT_MTU, GFP_ATOMIC);
		if (!skb_buff) {
			mhi_dev_net_log(MHI_ERROR,
				"Error while allocating skb\n");
			return -ENOMEM;
	while (1) {
		spin_lock_irqsave(&mhi_handle->rd_lock, flags);
		if (list_empty(&mhi_handle->rx_buffers)) {
			spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
			break;
		}

		req = container_of(mhi_handle->rx_buffers.next,
				struct mhi_req, list);
		list_del_init(&req->list);
		spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
		skb = alloc_skb(MHI_NET_DEFAULT_MTU, GFP_ATOMIC);
		if (skb == NULL) {
			pr_err("%s(): skb alloc failed\n", __func__);
			spin_lock_irqsave(&mhi_handle->rd_lock, flags);
			list_add_tail(&req->list, &mhi_handle->rx_buffers);
			spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
			ret_val = -ENOMEM;
			return ret_val;
		}
		bytes_avail = mhi_dev_read_channel(client_handle,
				skb_buff->data,
				buf_size, &chained);
		mhi_dev_net_log(MHI_VERBOSE,
				"dev_read_channel time = %lld\n",
			ktime_to_us(ktime_sub(ktime_get(), start_time)));

		req->client = client_handle;
		req->chan = chan;
		req->buf = skb->data;
		req->len = MHI_NET_DEFAULT_MTU;
		req->context = skb;
		req->mode = IPA_DMA_ASYNC;
		bytes_avail = mhi_dev_read_channel(req);

		if (bytes_avail < 0) {
			pr_err("Failed to read chan %d bytes_avail = %d\n",
					chan, bytes_avail);
			spin_lock_irqsave(&mhi_handle->rd_lock, flags);
			kfree_skb(skb);
			list_add_tail(&req->list, &mhi_handle->rx_buffers);
			spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
			ret_val = -EIO;
			break;
			return 0;
		}
		/* no data to send to network stack, break */
		if (!bytes_avail)
			break;

		skb_buff->len = bytes_avail;
		mhi_dev_net_log(MHI_VERBOSE, "reading frm chan %d buff size %d",
				chan, buf_size);
		mhi_dev_net_log(MHI_VERBOSE, "bytes_read %d chained %d",
				bytes_avail, chained);
		skb_buff->protocol =
			mhi_dev_net_eth_type_trans(skb_buff);
		skb_put(skb_buff, bytes_avail);
		mhi_handle->dev->stats.rx_packets++;
		skb_buff->dev = mhi_handle->dev;
		start_time = ktime_get();
		netif_rx(skb_buff);
	} while (1);
		if (!bytes_avail) {
			spin_lock_irqsave(&mhi_handle->rd_lock, flags);
			kfree_skb(skb);
			list_add_tail(&req->list, &mhi_handle->rx_buffers);
			spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
			return 0;
		}
	}
	/* coming out while only in case of no data or error */
	kfree_skb(skb_buff);
	 return ret_val;

}

static void mhi_dev_net_write_completion_cb(void *req)
{
	struct mhi_dev_net_client *client_handle = mhi_net_ctxt.client_handle;
	struct mhi_req *wreq = (struct mhi_req *)req;
	struct sk_buff *skb = wreq->context;
	unsigned long   flags;

	kfree_skb(skb);
	spin_lock_irqsave(&client_handle->wrt_lock, flags);
	list_add_tail(&wreq->list, &client_handle->wr_req_buffers);
	spin_unlock_irqrestore(&client_handle->wrt_lock, flags);
}

static int mhi_dev_net_alloc_write_reqs(struct mhi_dev_net_client *client)
{
	int nreq = 0, rc = 0;
	struct mhi_req *wreq;

	while (nreq < MHI_MAX_TX_REQ) {
		wreq = kzalloc(sizeof(struct mhi_req), GFP_ATOMIC);
		if (!wreq)
			return -ENOMEM;
		wreq->client_cb =  mhi_dev_net_write_completion_cb;
		list_add_tail(&wreq->list, &client->wr_req_buffers);
		nreq++;
	}
	mhi_dev_net_log(MHI_INFO,
			"mhi write reqs allocation success\n");
	return rc;

}

static int mhi_dev_net_alloc_read_reqs(struct mhi_dev_net_client *client)
{
	int nreq = 0, rc = 0;
	struct mhi_req *mreq;

	while (nreq < MHI_MAX_RX_REQ) {
		mreq = kzalloc(sizeof(struct mhi_req), GFP_ATOMIC);
		if (!mreq)
			return -ENOMEM;
		mreq->len =  TRB_MAX_DATA_SIZE;
		mreq->client_cb =  mhi_dev_net_read_completion_cb;
		list_add_tail(&mreq->list, &client->rx_buffers);
		nreq++;
	}
	mhi_dev_net_log(MHI_INFO,
			"mhi read reqs allocation success\n");
	return rc;

}

static int mhi_dev_net_open(struct net_device *dev)
@@ -297,31 +380,33 @@ static int mhi_dev_net_open(struct net_device *dev)
	struct mhi_dev_net_client *mhi_dev_net_ptr =
		*(struct mhi_dev_net_client **)netdev_priv(dev);
	mhi_dev_net_log(MHI_INFO,
			"%s mhi_net_dev interface is up for IN %d OUT %d\n",
			__func__, mhi_dev_net_ptr->out_chan,
			"mhi_net_dev interface is up for IN %d OUT %d\n",
			mhi_dev_net_ptr->out_chan,
			mhi_dev_net_ptr->in_chan);
	netif_start_queue(dev);
	return 0;
}

static int mhi_dev_net_xmit(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t mhi_dev_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct mhi_dev_net_client *mhi_dev_net_ptr =
			*(struct mhi_dev_net_client **)netdev_priv(dev);
	unsigned long flags;

	mhi_dev_net_log(MHI_VERBOSE, "SKB received\n");
	if (skb->len <= 0) {
		mhi_dev_net_log(MHI_ERROR,
				"Invalid skb received freeing skb\n");
		kfree_skb(skb);
		return NETDEV_TX_OK;
	}
	spin_lock_irqsave(&mhi_dev_net_ptr->wrt_lock, flags);
	skb_queue_tail(&(mhi_dev_net_ptr->tx_buffers), skb);
	atomic_inc(&mhi_dev_net_ptr->pckt_queue_count);
	spin_unlock_irqrestore(&mhi_dev_net_ptr->wrt_lock, flags);

	queue_work(mhi_dev_net_ptr->pending_pckt_wq,
			&mhi_dev_net_ptr->xmit_work);
	mhi_dev_net_log(MHI_VERBOSE, "Exiting from transmit function\n");
	return 0;

	return NETDEV_TX_OK;
}

static int mhi_dev_net_stop(struct net_device *dev)
@@ -408,6 +493,8 @@ static int mhi_dev_net_open_channels(struct mhi_dev_net_client *client)
{
	int rc = 0;
	int ret = 0;
	struct list_head *cp, *q;
	struct mhi_req *mreq;

	mhi_dev_net_log(MHI_DBG, "opening OUT %d IN %d channels\n",
			client->out_chan,
@@ -442,6 +529,21 @@ static int mhi_dev_net_open_channels(struct mhi_dev_net_client *client)
	mutex_unlock(&client->out_chan_lock);
	mhi_dev_net_log(MHI_INFO, "IN %d, OUT %d channels are opened",
			client->in_chan, client->out_chan);

	INIT_LIST_HEAD(&client->rx_buffers);
	INIT_LIST_HEAD(&client->wr_req_buffers);
	/* pre allocate read request buffer */

	ret = mhi_dev_net_alloc_read_reqs(client);
	if (ret) {
		pr_err("failed to allocate rx req buffers\n");
		goto rx_req_failed;
	}
	ret = mhi_dev_net_alloc_write_reqs(client);
	if (ret) {
		pr_err("failed to allocate write req buffers\n");
		goto tx_req_failed;
	}
	if (atomic_read(&client->tx_enabled)) {
		ret = mhi_dev_net_enable_iface(client);
		if (ret < 0)
@@ -449,10 +551,15 @@ static int mhi_dev_net_open_channels(struct mhi_dev_net_client *client)
					"failed to enable mhi_dev_net iface\n");
	}
	return ret;
tx_req_failed:
	list_for_each_safe(cp, q, &client->rx_buffers);
	mreq = list_entry(cp, struct mhi_req, list);
	list_del(cp);
	kfree(mreq);
rx_req_failed:
	mhi_dev_close_channel(client->in_handle);
handle_in_err:
	mhi_dev_close_channel(client->out_handle);
	mutex_unlock(&client->in_chan_lock);
	mutex_unlock(&client->out_chan_lock);
handle_not_rdy_err:
	mutex_unlock(&client->in_chan_lock);
	mutex_unlock(&client->out_chan_lock);
@@ -488,7 +595,8 @@ static int mhi_dev_net_rgstr_client(struct mhi_dev_net_client *client, int idx)
	client->in_chan = idx + 1;
	mutex_init(&client->in_chan_lock);
	mutex_init(&client->out_chan_lock);
	spin_lock_init(&client->write_chan_lock);
	spin_lock_init(&client->wrt_lock);
	spin_lock_init(&client->rd_lock);
	mhi_dev_net_log(MHI_INFO, "Registering out %d, In %d channels\n",
			client->out_chan, client->in_chan);

@@ -517,12 +625,8 @@ int mhi_dev_net_interface_init(void)
	/*Process pending packet work queue*/
	mhi_net_client->pending_pckt_wq =
		create_singlethread_workqueue("pending_xmit_pckt_wq");
	INIT_WORK(&mhi_net_client->xmit_work, process_queue_packets);

	/* read data from host when event trigger */
	mhi_net_client->read_data_wq =
		create_singlethread_workqueue("dev_read_from_host_wq");
	INIT_WORK(&mhi_net_client->dev_read_wrk, mhi_dev_net_rx_scheduler);
	INIT_WORK(&mhi_net_client->xmit_work,
			mhi_dev_net_process_queue_packets);

	mhi_dev_net_log(MHI_INFO,
			"Registering for MHI transfer events from host\n");
+24 −9
Original line number Diff line number Diff line
@@ -79,7 +79,6 @@ int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
			mhi_dev_read_from_host(ring->mhi_dev, &host_addr);
		}
	}

	return 0;
}

@@ -266,10 +265,12 @@ int mhi_dev_process_ring(struct mhi_dev_ring *ring)
EXPORT_SYMBOL(mhi_dev_process_ring);

int mhi_dev_add_element(struct mhi_dev_ring *ring,
				union mhi_dev_ring_element_type *element)
				union mhi_dev_ring_element_type *element,
				struct event_req *ereq, int evt_offset)
{
	uint32_t old_offset = 0;
	struct mhi_addr host_addr;
	uint32_t num_elem = 0;

	if (!ring || !element) {
		pr_err("%s: Invalid context\n", __func__);
@@ -285,6 +286,13 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,

	old_offset = ring->rd_offset;

	if (evt_offset) {
		num_elem = evt_offset /
			(sizeof(union mhi_dev_ring_element_type));
		ring->rd_offset += num_elem;
		if (ring->rd_offset >= ring->ring_size)
			ring->rd_offset -= ring->ring_size;
	} else
		mhi_dev_ring_inc_index(ring, ring->rd_offset);

	ring->ring_ctx->generic.rp = (ring->rd_offset *
@@ -303,14 +311,22 @@ int mhi_dev_add_element(struct mhi_dev_ring *ring,
			sizeof(union mhi_dev_ring_element_type) * old_offset;

	host_addr.virt_addr = element;

	if (evt_offset)
		host_addr.size = evt_offset;
	else
		host_addr.size = sizeof(union mhi_dev_ring_element_type);

	mhi_log(MHI_MSG_VERBOSE, "adding element to ring (%d)\n", ring->id);
	mhi_log(MHI_MSG_VERBOSE, "rd_ofset %d\n", ring->rd_offset);
	mhi_log(MHI_MSG_VERBOSE, "type %d\n", element->generic.type);

	mhi_dev_write_to_host(ring->mhi_dev, &host_addr);

	if (ereq)
		mhi_dev_write_to_host(ring->mhi_dev, &host_addr,
				ereq, MHI_DEV_DMA_ASYNC);
	else
		mhi_dev_write_to_host(ring->mhi_dev, &host_addr,
				NULL, MHI_DEV_DMA_SYNC);
	return 0;
}
EXPORT_SYMBOL(mhi_dev_add_element);
@@ -368,7 +384,6 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
		(union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
		(ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));


	ring->ring_ctx_shadow = ring->ring_ctx;

	if (ring->type != RING_TYPE_ER) {
+42 −34

File changed.

Preview size limit exceeded, changes collapsed.