Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c75e07f6 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: npu: clean up debug messages"

parents 308be22b 0ddb7728
Loading
Loading
Loading
Loading
+27 −19
Original line number Diff line number Diff line
@@ -17,18 +17,6 @@
 * Function Definitions - Debug
 * -------------------------------------------------------------------------
 */
static void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
{
	uint32_t reg_val;

	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
	NPU_INFO("fw jobs execute started count = %d\n", reg_val);
	reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
	NPU_INFO("fw jobs execute finished count = %d\n", reg_val);
	reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
	NPU_INFO("fw jobs aco parser debug = %d\n", reg_val);
}

void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr)
{
	int32_t *ptr = (int32_t *)cmd_ptr;
@@ -50,7 +38,7 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)
		target_que * sizeof(struct hfi_queue_header);
	int32_t *ptr = (int32_t *)&queue;
	size_t content_off;
	uint32_t *content;
	uint32_t *content, content_size;
	int i;

	MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
@@ -58,21 +46,42 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)

	NPU_ERR("DUMP IPC queue %d:\n", target_que);
	NPU_ERR("Header size %d:\n", HFI_QUEUE_HEADER_SIZE);
	NPU_ERR("Content size %d:\n", queue.qhdr_q_size);
	NPU_ERR("============QUEUE HEADER=============\n");
	for (i = 0; i < HFI_QUEUE_HEADER_SIZE/4; i++)
		NPU_ERR("%x\n", ptr[i]);

	content_off = (size_t)IPC_ADDR + queue.qhdr_start_offset;
	content = kzalloc(queue.qhdr_q_size, GFP_KERNEL);
	content_off = (size_t)(IPC_ADDR + queue.qhdr_start_offset +
		queue.qhdr_read_idx);
	if (queue.qhdr_write_idx >= queue.qhdr_read_idx)
		content_size = queue.qhdr_write_idx - queue.qhdr_read_idx;
	else
		content_size = queue.qhdr_q_size - queue.qhdr_read_idx +
			queue.qhdr_write_idx;

	NPU_ERR("Content size %d:\n", content_size);
	if (content_size == 0)
		return;

	content = kzalloc(content_size, GFP_KERNEL);
	if (!content) {
		NPU_ERR("failed to allocate IPC queue content buffer\n");
		return;
	}

	MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size);
	if (queue.qhdr_write_idx >= queue.qhdr_read_idx) {
		MEMR(npu_dev, (void *)content_off, content, content_size);
	} else {
		MEMR(npu_dev, (void *)content_off, content,
			queue.qhdr_q_size - queue.qhdr_read_idx);

		MEMR(npu_dev, (void *)((size_t)IPC_ADDR +
			queue.qhdr_start_offset),
			(void *)((size_t)content + queue.qhdr_q_size -
			queue.qhdr_read_idx), queue.qhdr_write_idx);
	}

	NPU_ERR("============QUEUE CONTENT=============\n");
	for (i = 0; i < queue.qhdr_q_size/4; i++)
	for (i = 0; i < content_size/4; i++)
		NPU_ERR("%x\n", content[i]);

	NPU_ERR("DUMP IPC queue %d END\n", target_que);
@@ -110,7 +119,6 @@ void npu_dump_debug_info(struct npu_device *npu_dev)
		return;
	}

	npu_dump_debug_timeout_stats(npu_dev);
	npu_dump_dbg_registers(npu_dev);
	npu_dump_all_ipc_queue(npu_dev);
}
+16 −37
Original line number Diff line number Diff line
@@ -339,6 +339,7 @@ int npu_enable_core_power(struct npu_device *npu_dev)
	int ret = 0;

	mutex_lock(&npu_dev->dev_lock);
	NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num);
	if (!pwr->pwr_vote_num) {
		ret = npu_enable_regulators(npu_dev);
		if (ret)
@@ -370,6 +371,7 @@ void npu_disable_core_power(struct npu_device *npu_dev)
	struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;

	mutex_lock(&npu_dev->dev_lock);
	NPU_DBG("Disable core power %d\n", pwr->pwr_vote_num);
	if (!pwr->pwr_vote_num) {
		mutex_unlock(&npu_dev->dev_lock);
		return;
@@ -581,7 +583,6 @@ static void npu_suspend_devbw(struct npu_device *npu_dev)

	if (pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
		for (i = 0; i < pwr->devbw_num; i++) {
			NPU_DBG("Suspend devbw%d\n", i);
			ret = devfreq_suspend_devbw(pwr->devbw[i]);
			if (ret)
				NPU_ERR("devfreq_suspend_devbw failed rc:%d\n",
@@ -598,7 +599,6 @@ static void npu_resume_devbw(struct npu_device *npu_dev)

	if (!pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
		for (i = 0; i < pwr->devbw_num; i++) {
			NPU_DBG("Resume devbw%d\n", i);
			ret = devfreq_resume_devbw(pwr->devbw[i]);
			if (ret)
				NPU_ERR("devfreq_resume_devbw failed rc:%d\n",
@@ -675,10 +675,7 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
				continue;
		}

		NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);

		if (core_clks[i].reset) {
			NPU_DBG("Deassert %s\n", core_clks[i].clk_name);
			rc = reset_control_deassert(core_clks[i].reset);
			if (rc)
				NPU_WARN("deassert %s reset failed\n",
@@ -695,9 +692,6 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
		if (npu_is_exclude_rate_clock(core_clks[i].clk_name))
			continue;

		NPU_DBG("setting rate of clock %s to %ld\n",
			core_clks[i].clk_name, pwrlevel->clk_freq[i]);

		rc = clk_set_rate(core_clks[i].clk,
			pwrlevel->clk_freq[i]);
		/* not fatal error, keep using previous clk rate */
@@ -718,11 +712,9 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
				if (npu_is_post_clock(core_clks[i].clk_name))
					continue;
			}
			NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
			clk_disable_unprepare(core_clks[i].clk);

			if (core_clks[i].reset) {
				NPU_DBG("Assert %s\n", core_clks[i].clk_name);
				rc = reset_control_assert(core_clks[i].reset);
				if (rc)
					NPU_WARN("assert %s reset failed\n",
@@ -750,9 +742,6 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)

		/* set clock rate to 0 before disabling it */
		if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
			NPU_DBG("setting rate of clock %s to 0\n",
				core_clks[i].clk_name);

			rc = clk_set_rate(core_clks[i].clk, 0);
			if (rc) {
				NPU_ERR("clk_set_rate %s to 0 failed\n",
@@ -760,11 +749,9 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
			}
		}

		NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
		clk_disable_unprepare(core_clks[i].clk);

		if (core_clks[i].reset) {
			NPU_DBG("Assert %s\n", core_clks[i].clk_name);
			rc = reset_control_assert(core_clks[i].reset);
			if (rc)
				NPU_WARN("assert %s reset failed\n",
@@ -838,8 +825,6 @@ static int npu_enable_regulators(struct npu_device *npu_dev)
					regulators[i].regulator_name);
				break;
			}
			NPU_DBG("regulator %s enabled\n",
				regulators[i].regulator_name);
		}
	}

@@ -859,11 +844,9 @@ static void npu_disable_regulators(struct npu_device *npu_dev)
	struct npu_regulator *regulators = npu_dev->regulators;

	if (host_ctx->power_vote_num > 0) {
		for (i = 0; i < npu_dev->regulator_num; i++) {
		for (i = 0; i < npu_dev->regulator_num; i++)
			regulator_disable(regulators[i].regulator);
			NPU_DBG("regulator %s disabled\n",
				regulators[i].regulator_name);
		}

		host_ctx->power_vote_num--;
	}
}
@@ -895,13 +878,12 @@ int npu_enable_irq(struct npu_device *npu_dev)
	reg_val |= RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE;
	npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_ENABLE(0),
		reg_val);
	for (i = 0; i < NPU_MAX_IRQ; i++) {
		if (npu_dev->irq[i].irq != 0) {
	for (i = 0; i < NPU_MAX_IRQ; i++)
		if (npu_dev->irq[i].irq != 0)
			enable_irq(npu_dev->irq[i].irq);
			NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq);
		}
	}

	npu_dev->irq_enabled = true;
	NPU_DBG("irq enabled\n");

	return 0;
}
@@ -916,12 +898,9 @@ void npu_disable_irq(struct npu_device *npu_dev)
		return;
	}

	for (i = 0; i < NPU_MAX_IRQ; i++) {
		if (npu_dev->irq[i].irq != 0) {
	for (i = 0; i < NPU_MAX_IRQ; i++)
		if (npu_dev->irq[i].irq != 0)
			disable_irq(npu_dev->irq[i].irq);
			NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq);
		}
	}

	reg_val = npu_cc_reg_read(npu_dev,
		NPU_CC_NPU_MASTERn_GENERAL_IRQ_OWNER(0));
@@ -936,6 +915,7 @@ void npu_disable_irq(struct npu_device *npu_dev)
	npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_CLEAR(0),
		RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE);
	npu_dev->irq_enabled = false;
	NPU_DBG("irq disabled\n");
}

/* -------------------------------------------------------------------------
@@ -981,12 +961,13 @@ int npu_enable_sys_cache(struct npu_device *npu_dev)
		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
		REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);

		NPU_DBG("prior to activate sys cache\n");
		rc = llcc_slice_activate(npu_dev->sys_cache);
		if (rc)
		if (rc) {
			NPU_ERR("failed to activate sys cache\n");
		else
			NPU_DBG("sys cache activated\n");
			llcc_slice_putd(npu_dev->sys_cache);
			npu_dev->sys_cache = NULL;
			rc = 0;
		}
	}

	return rc;
@@ -1640,8 +1621,6 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
	bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
	bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE;

	NPU_INFO("BW MBps: AB: %d IB: %d\n", new_ab, new_ib);

	ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i);
	if (ret) {
		NPU_ERR("bandwidth request failed (%d)\n", ret);
+0 −5
Original line number Diff line number Diff line
@@ -29,11 +29,6 @@
/* Data value for debug */
#define REG_NPU_FW_DEBUG_DATA       NPU_GPR13

/* Started job count */
#define REG_FW_JOB_CNT_START        NPU_GPR14
/* Finished job count */
#define REG_FW_JOB_CNT_END          NPU_GPR15

/* NPU FW Control/Status Register */
/* bit fields definitions in CTRL STATUS REG */
#define FW_CTRL_STATUS_IPC_READY_BIT            0
+1 −3
Original line number Diff line number Diff line
@@ -166,9 +166,7 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev,
			status = INTERRUPT_RAISE_NPU(npu_dev);
	}

	if (status == 0)
		NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n");
	else
	if (status)
		NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n");

	return status;
+33 −49
Original line number Diff line number Diff line
@@ -99,7 +99,6 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
	}

	/* Keep reading ctrl status until NPU is ready */
	NPU_DBG("waiting for status ready from fw\n");
	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
		ret = -EPERM;
@@ -629,7 +628,6 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr)
	struct npu_device *npu_dev = (struct npu_device *)ptr;
	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;

	NPU_DBG("NPU ipc irq %d\n", irq);
	INTERRUPT_ACK(npu_dev, irq);

	/* Check that the event thread currently is running */
@@ -645,23 +643,17 @@ irqreturn_t npu_general_intr_hdlr(int irq, void *ptr)
	struct npu_device *npu_dev = (struct npu_device *)ptr;
	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;

	NPU_DBG("NPU general irq %d\n", irq);

	reg_val = npu_cc_reg_read(npu_dev,
		NPU_CC_NPU_MASTERn_GENERAL_IRQ_STATUS(0));
	NPU_DBG("GENERAL_IRQ_STATUS %x\n", reg_val);
	reg_val &= (RSC_SHUTDOWN_REQ_IRQ_STATUS | RSC_BRINGUP_REQ_IRQ_STATUS);
	ack_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL);

	if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) {
		NPU_DBG("Send SHUTDOWN ACK\n");
	if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS)
		ack_val |= Q6SS_RSC_SHUTDOWN_ACK_EN;
	}

	if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) {
		NPU_DBG("Send BRINGUP ACK\n");
	if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS)
		ack_val |= Q6SS_RSC_BRINGUP_ACK_EN;
	}

	npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, ack_val);
	npu_cc_reg_write(npu_dev,
@@ -773,7 +765,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
	}

	/* Keep reading ctrl status until NPU is ready */
	NPU_DBG("waiting for status ready from fw\n");
	if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
		FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
		NPU_ERR("wait for fw status ready timedout\n");
@@ -1180,14 +1171,6 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
		NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
			exe_rsp_pkt->header.status);
		NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
		NPU_DBG("e2e_IPC_time: %d (in tick count)\n",
			exe_rsp_pkt->stats.e2e_ipc_tick_count);
		NPU_DBG("aco_load_time: %d (in tick count)\n",
			exe_rsp_pkt->stats.aco_load_tick_count);
		NPU_DBG("aco_execute_time: %d (in tick count)\n",
			exe_rsp_pkt->stats.aco_execution_tick_count);
		NPU_DBG("total_num_layers: %d\n",
			exe_rsp_pkt->stats.exe_stats.total_num_layers);

		network = get_network_by_hdl(host_ctx, NULL,
			exe_rsp_pkt->network_hdl);
@@ -1509,13 +1492,13 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
		NPU_ERR("Another cmd is pending\n");
		ret = -EBUSY;
	} else {
		NPU_DBG("Send cmd %d network id %lld\n",
			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
			network->id);
		network->cmd_async = async;
		network->cmd_ret_status = 0;
		network->cmd_pending = true;
		network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
		NPU_DBG("Send cmd %d network id %llx trans id %d\n",
			((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
			network->id, network->trans_id);
		ret = npu_host_ipc_send_cmd(npu_dev,
			IPC_QUEUE_APPS_EXEC, cmd_ptr);
		if (ret)
@@ -1591,7 +1574,7 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
			network++;
		}
	}
	pr_debug("max perf mode for networks: %d\n", max_perf_mode);
	NPU_DBG("max perf mode for networks: %d\n", max_perf_mode);

	return max_perf_mode;
}
@@ -1664,8 +1647,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
		goto error_free_network;
	}

	NPU_DBG("network address %llx\n", network->phy_add);

	ret = set_perf_mode(npu_dev);
	if (ret) {
		NPU_ERR("set_perf_mode failed\n");
@@ -1689,7 +1670,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
	reinit_completion(&network->cmd_done);
	ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
	if (ret) {
		NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
		NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
		goto error_free_network;
	}

@@ -1702,19 +1683,20 @@ int32_t npu_host_load_network_v2(struct npu_client *client,

	mutex_lock(&host_ctx->lock);

	if (!ret) {
		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
		npu_dump_debug_info(npu_dev);
		ret = -ETIMEDOUT;
		goto error_load_network;
	}

	if (network->fw_error) {
		ret = -EIO;
		NPU_ERR("fw is in error state during load_v2 network\n");
		goto error_free_network;
	}

	if (!ret) {
		NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
			network->id, network->trans_id);
		npu_dump_debug_info(npu_dev);
		ret = -ETIMEDOUT;
		goto error_load_network;
	}

	ret = network->cmd_ret_status;
	if (ret)
		goto error_free_network;
@@ -1817,21 +1799,23 @@ int32_t npu_host_unload_network(struct npu_client *client,

	mutex_lock(&host_ctx->lock);

	if (network->fw_error) {
		ret = -EIO;
		NPU_ERR("fw is in error state during unload network\n");
		goto free_network;
	}

	if (!ret) {
		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n");
		NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
			network->id, network->trans_id);
		npu_dump_debug_info(npu_dev);
		network->cmd_pending = false;
		ret = -ETIMEDOUT;
		goto free_network;
	}

	if (network->fw_error) {
		ret = -EIO;
		NPU_ERR("fw is in error state during unload network\n");
	} else {
	ret = network->cmd_ret_status;
	NPU_DBG("unload network status %d\n", ret);
	}

free_network:
	/*
@@ -1952,21 +1936,21 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
		NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);

	mutex_lock(&host_ctx->lock);
	if (network->fw_error) {
		ret = -EIO;
		NPU_ERR("fw is in error state during execute_v2 network\n");
		goto free_exec_packet;
	}

	if (!ret) {
		NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE_V2 time out\n",
			network->id);
		NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
			network->id, network->trans_id);
		npu_dump_debug_info(npu_dev);
		network->cmd_pending = false;
		ret = -ETIMEDOUT;
		goto free_exec_packet;
	}

	if (network->fw_error) {
		ret = -EIO;
		NPU_ERR("fw is in error state during execute_v2 network\n");
		goto free_exec_packet;
	}

	ret = network->cmd_ret_status;
	if (!ret) {
		exec_ioctl->stats_buf_size = network->stats_buf_size;