Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 455fd0af authored by Jyothi Kumar Seerapu's avatar Jyothi Kumar Seerapu
Browse files

msm: mhi_dev: Clean up logs in mhi-dev driver



Replace pr_err logs with mhi log support present in mhi-dev driver.
Added separate buffer logging support for MHI IPC error logs.

Change-Id: I10e39fabc8574ccfc507a8b1d34d25e9ab78265a
Signed-off-by: default avatarJyothi Kumar Seerapu <quic_jseerapu@quicinc.com>
parent 3f1349ab
Loading
Loading
Loading
Loading
+222 −135

File changed.

Preview size limit exceeded, changes collapsed.

+7 −1
Original line number Diff line number Diff line
@@ -673,7 +673,9 @@ enum mhi_msg_level {
extern uint32_t bhi_imgtxdb;
extern enum mhi_msg_level mhi_msg_lvl;
extern enum mhi_msg_level mhi_ipc_msg_lvl;
extern enum mhi_msg_level mhi_ipc_err_msg_lvl;
extern void *mhi_ipc_log;
extern void *mhi_ipc_err_log;

#define mhi_log(_msg_lvl, _msg, ...) do { \
	if (_msg_lvl >= mhi_msg_lvl) { \
@@ -684,6 +686,10 @@ extern void *mhi_ipc_log;
		ipc_log_string(mhi_ipc_log,                     \
		"[0x%x %s] " _msg, bhi_imgtxdb, __func__, ##__VA_ARGS__); \
	} \
	if (mhi_ipc_err_log && (_msg_lvl >= mhi_ipc_err_msg_lvl)) { \
		ipc_log_string(mhi_ipc_err_log,                     \
		"[0x%x %s] " _msg, bhi_imgtxdb, __func__, ##__VA_ARGS__); \
	} \
} while (0)


+17 −12
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ enum mhi_dev_net_dbg_lvl {
	MSG_NET_reserved = 0x80000000
};

static enum mhi_dev_net_dbg_lvl mhi_net_msg_lvl = MHI_CRITICAL;
static enum mhi_dev_net_dbg_lvl mhi_net_msg_lvl = MHI_ERROR;
static enum mhi_dev_net_dbg_lvl mhi_net_ipc_log_lvl = MHI_VERBOSE;
static void *mhi_net_ipc_log;

@@ -144,7 +144,7 @@ static void mhi_dev_net_process_queue_packets(struct work_struct *work)
	struct mhi_req *wreq = NULL;

	if (mhi_dev_channel_isempty(client->in_handle)) {
		mhi_dev_net_log(MHI_INFO, "%s stop network xmmit\n", __func__);
		mhi_dev_net_log(MHI_INFO, "stop network xmmit\n");
		netif_stop_queue(client->dev);
		return;
	}
@@ -176,8 +176,9 @@ static void mhi_dev_net_process_queue_packets(struct work_struct *work)
		spin_unlock_irqrestore(&client->wrt_lock, flags);
		xfer_data = mhi_dev_write_channel(wreq);
		if (xfer_data <= 0) {
			pr_err("%s(): Failed to write skb len %d\n",
					__func__, skb->len);
			mhi_dev_net_log(MHI_ERROR,
					"Failed to write skb len %d\n",
					 skb->len);
			kfree_skb(skb);
			return;
		}
@@ -186,8 +187,7 @@ static void mhi_dev_net_process_queue_packets(struct work_struct *work)
		/* Check if free buffers are available*/
		if (mhi_dev_channel_isempty(client->in_handle)) {
			mhi_dev_net_log(MHI_INFO,
					"%s buffers are full stop xmit\n",
					__func__);
					"buffers are full stop xmit\n");
			netif_stop_queue(client->dev);
			break;
		}
@@ -281,7 +281,7 @@ static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *mhi_handle)
		spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
		skb = alloc_skb(MHI_NET_DEFAULT_MTU, GFP_KERNEL);
		if (skb == NULL) {
			pr_err("%s(): skb alloc failed\n", __func__);
			mhi_dev_net_log(MHI_ERROR, "skb alloc failed\n");
			spin_lock_irqsave(&mhi_handle->rd_lock, flags);
			list_add_tail(&req->list, &mhi_handle->rx_buffers);
			spin_unlock_irqrestore(&mhi_handle->rd_lock, flags);
@@ -298,7 +298,8 @@ static ssize_t mhi_dev_net_client_read(struct mhi_dev_net_client *mhi_handle)
		bytes_avail = mhi_dev_read_channel(req);

		if (bytes_avail < 0) {
			pr_err("Failed to read chan %d bytes_avail = %d\n",
			mhi_dev_net_log(MHI_ERROR,
					"Failed to read chan %d bytes_avail = %d\n",
					chan, bytes_avail);
			spin_lock_irqsave(&mhi_handle->rd_lock, flags);
			kfree_skb(skb);
@@ -473,7 +474,8 @@ static int mhi_dev_net_enable_iface(struct mhi_dev_net_client *mhi_dev_net_ptr)
			mhi_dev_net_ether_setup :
			mhi_dev_net_rawip_setup);
	if (!netdev) {
		pr_err("Failed to allocate netdev for mhi_dev_net\n");
		mhi_dev_net_log(MHI_ERROR,
			"Failed to allocate netdev for mhi_dev_net\n");
		goto net_dev_alloc_fail;
	}

@@ -488,7 +490,8 @@ static int mhi_dev_net_enable_iface(struct mhi_dev_net_client *mhi_dev_net_ptr)
	*mhi_dev_net_ctxt = mhi_dev_net_ptr;
	ret = register_netdev(mhi_dev_net_ptr->dev);
	if (ret) {
		pr_err("Failed to register mhi_dev_net device\n");
		mhi_dev_net_log(MHI_ERROR,
				"Failed to register mhi_dev_net device\n");
		goto net_dev_reg_fail;
	}
	mhi_dev_net_log(MHI_INFO, "Successfully registred mhi_dev_net\n");
@@ -546,12 +549,14 @@ static int mhi_dev_net_open_chan_create_netif(struct mhi_dev_net_client *client)

	ret = mhi_dev_net_alloc_read_reqs(client);
	if (ret) {
		pr_err("failed to allocate rx req buffers\n");
		mhi_dev_net_log(MHI_ERROR,
			"failed to allocate rx req buffers\n");
		goto rx_req_failed;
	}
	ret = mhi_dev_net_alloc_write_reqs(client);
	if (ret) {
		pr_err("failed to allocate write req buffers\n");
		mhi_dev_net_log(MHI_ERROR,
			"failed to allocate write req buffers\n");
		goto tx_req_failed;
	}
	if (atomic_read(&client->tx_enabled)) {
+2 −2
Original line number Diff line number Diff line
@@ -101,7 +101,7 @@ static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev,
	chid_idx = chdb_id/32;

	if (chid_idx >= MHI_MASK_ROWS_CH_EV_DB) {
		pr_err("Invalid channel id:%d\n", chid_idx);
		mhi_log(MHI_MSG_ERROR, "Invalid channel id:%d\n", chid_idx);
		return -EINVAL;
	}

@@ -633,7 +633,7 @@ int mhi_dev_restore_mmio(struct mhi_dev *dev)
			rc = mhi_dev_mmio_write(dev, MHI_CHDB_INT_MASK_A7_n(i),
							dev->chdb[i].mask);
			if (rc) {
				mhi_log(MHI_MSG_VERBOSE,
				mhi_log(MHI_MSG_ERROR,
					"Error writing enable for A7\n");
				return rc;
			}
+5 −18
Original line number Diff line number Diff line
@@ -143,7 +143,7 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
	case RING_TYPE_CMD:
		rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset);
		if (rc) {
			pr_err("%s: CMD DB read failed\n", __func__);
			mhi_log(MHI_MSG_ERROR, "CMD DB read failed\n");
			return rc;
		}
		mhi_log(MHI_MSG_VERBOSE,
@@ -153,14 +153,14 @@ int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
	case RING_TYPE_ER:
		rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset);
		if (rc) {
			pr_err("%s: EVT DB read failed\n", __func__);
			mhi_log(MHI_MSG_ERROR, "EVT DB read failed\n");
			return rc;
		}
		break;
	case RING_TYPE_CH:
		rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset);
		if (rc) {
			pr_err("%s: CH DB read failed\n", __func__);
			mhi_log(MHI_MSG_ERROR, "CH DB read failed\n");
			return rc;
		}
		mhi_log(MHI_MSG_VERBOSE,
@@ -237,7 +237,7 @@ int mhi_dev_process_ring(struct mhi_dev_ring *ring)
		/* notify the clients that there are elements in the ring */
		rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
		if (rc)
			pr_err("Error fetching elements\n");
			mhi_log(MHI_MSG_ERROR, "Error fetching elements\n");
		return rc;
	}
	mhi_log(MHI_MSG_VERBOSE,
@@ -490,19 +490,6 @@ int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
	ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset;
	ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset;

	if (ring->type == RING_TYPE_ER)
		ring->ring_ctx_shadow =
		(union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va +
			(ring->id - mhi->ev_ring_start) *
			sizeof(union mhi_dev_ring_ctx));
	else if (ring->type == RING_TYPE_CMD)
		ring->ring_ctx_shadow =
		(union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va;
	else if (ring->type == RING_TYPE_CH)
		ring->ring_ctx_shadow =
		(union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
		(ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));

	ring->ring_ctx_shadow = ring->ring_ctx;

	if (ring->type != RING_TYPE_ER || ring->type != RING_TYPE_CH) {
@@ -574,7 +561,7 @@ void mhi_ring_set_state(struct mhi_dev_ring *ring,
		return;

	if (state > RING_STATE_PENDING) {
		pr_err("%s: Invalid ring state\n", __func__);
		mhi_log(MHI_MSG_ERROR, "Invalid ring state\n");
		return;
	}

Loading