Loading drivers/media/platform/msm/npu_v2/npu_dbg.c +34 −19 Original line number Diff line number Diff line Loading @@ -24,18 +24,6 @@ * Function Definitions - Debug * ------------------------------------------------------------------------- */ static void npu_dump_debug_timeout_stats(struct npu_device *npu_dev) { uint32_t reg_val; reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START); NPU_INFO("fw jobs execute started count = %d\n", reg_val); reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END); NPU_INFO("fw jobs execute finished count = %d\n", reg_val); reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA); NPU_INFO("fw jobs aco parser debug = %d\n", reg_val); } void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr) { int32_t *ptr = (int32_t *)cmd_ptr; Loading @@ -57,7 +45,7 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que) target_que * sizeof(struct hfi_queue_header); int32_t *ptr = (int32_t *)&queue; size_t content_off; uint32_t *content; uint32_t *content, content_size; int i; MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue, Loading @@ -65,21 +53,42 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que) NPU_ERR("DUMP IPC queue %d:\n", target_que); NPU_ERR("Header size %d:\n", HFI_QUEUE_HEADER_SIZE); NPU_ERR("Content size %d:\n", queue.qhdr_q_size); NPU_ERR("============QUEUE HEADER=============\n"); for (i = 0; i < HFI_QUEUE_HEADER_SIZE/4; i++) NPU_ERR("%x\n", ptr[i]); content_off = (size_t)IPC_ADDR + queue.qhdr_start_offset; content = kzalloc(queue.qhdr_q_size, GFP_KERNEL); content_off = (size_t)(IPC_ADDR + queue.qhdr_start_offset + queue.qhdr_read_idx); if (queue.qhdr_write_idx >= queue.qhdr_read_idx) content_size = queue.qhdr_write_idx - queue.qhdr_read_idx; else content_size = queue.qhdr_q_size - queue.qhdr_read_idx + queue.qhdr_write_idx; NPU_ERR("Content size %d:\n", content_size); if (content_size == 0) return; content = kzalloc(content_size, GFP_KERNEL); if (!content) { NPU_ERR("failed to allocate IPC queue content buffer\n"); return; } MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size); if (queue.qhdr_write_idx >= queue.qhdr_read_idx) { MEMR(npu_dev, (void *)content_off, content, content_size); } else { MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size - queue.qhdr_read_idx); MEMR(npu_dev, (void *)((size_t)IPC_ADDR + queue.qhdr_start_offset), (void *)((size_t)content + queue.qhdr_q_size - queue.qhdr_read_idx), queue.qhdr_write_idx); } NPU_ERR("============QUEUE CONTENT=============\n"); for (i = 0; i < queue.qhdr_q_size/4; i++) for (i = 0; i < content_size/4; i++) NPU_ERR("%x\n", content[i]); NPU_ERR("DUMP IPC queue %d END\n", target_que); Loading Loading @@ -110,7 +119,13 @@ static void npu_dump_all_ipc_queue(struct npu_device *npu_dev) void npu_dump_debug_info(struct npu_device *npu_dev) { npu_dump_debug_timeout_stats(npu_dev); struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; if (host_ctx->fw_state != FW_ENABLED) { NPU_WARN("NPU is disabled\n"); return; } npu_dump_dbg_registers(npu_dev); npu_dump_all_ipc_queue(npu_dev); } drivers/media/platform/msm/npu_v2/npu_dev.c +34 −43 Original line number Diff line number Diff line Loading @@ -345,27 +345,30 @@ int npu_enable_core_power(struct npu_device *npu_dev) struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; int ret = 0; mutex_lock(&npu_dev->dev_lock); NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num); if (!pwr->pwr_vote_num) { ret = npu_enable_regulators(npu_dev); if (ret) return ret; goto fail; ret = npu_set_bw(npu_dev, 100, 100); if (ret) { npu_disable_regulators(npu_dev); return ret; goto fail; } ret = npu_enable_core_clocks(npu_dev); if (ret) { npu_set_bw(npu_dev, 0, 0); npu_disable_regulators(npu_dev); pwr->pwr_vote_num = 0; return ret; goto fail; } npu_resume_devbw(npu_dev); } pwr->pwr_vote_num++; fail: mutex_unlock(&npu_dev->dev_lock); return ret; } Loading @@ -374,8 +377,13 @@ void npu_disable_core_power(struct npu_device *npu_dev) { struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; if (!pwr->pwr_vote_num) mutex_lock(&npu_dev->dev_lock); NPU_DBG("Disable core power %d\n", pwr->pwr_vote_num); if (!pwr->pwr_vote_num) { mutex_unlock(&npu_dev->dev_lock); return; } pwr->pwr_vote_num--; if (!pwr->pwr_vote_num) { npu_suspend_devbw(npu_dev); Loading @@ -388,6 +396,7 @@ void npu_disable_core_power(struct npu_device *npu_dev) NPU_DBG("setting back to power level=%d\n", pwr->active_pwrlevel); } mutex_unlock(&npu_dev->dev_lock); } static int npu_enable_core_clocks(struct npu_device *npu_dev) Loading Loading @@ -581,7 +590,6 @@ static void npu_suspend_devbw(struct npu_device *npu_dev) if (pwr->bwmon_enabled && (pwr->devbw_num > 0)) { for (i = 0; i < pwr->devbw_num; i++) { NPU_DBG("Suspend devbw%d\n", i); ret = devfreq_suspend_devbw(pwr->devbw[i]); if (ret) NPU_ERR("devfreq_suspend_devbw failed rc:%d\n", Loading @@ -598,7 +606,6 @@ static void npu_resume_devbw(struct npu_device *npu_dev) if (!pwr->bwmon_enabled && (pwr->devbw_num > 0)) { for (i = 0; i < pwr->devbw_num; i++) { NPU_DBG("Resume devbw%d\n", i); ret = devfreq_resume_devbw(pwr->devbw[i]); if (ret) NPU_ERR("devfreq_resume_devbw failed rc:%d\n", Loading Loading @@ -675,10 +682,7 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) continue; } NPU_DBG("enabling clock %s\n", core_clks[i].clk_name); if (core_clks[i].reset) { NPU_DBG("Deassert %s\n", core_clks[i].clk_name); rc = reset_control_deassert(core_clks[i].reset); if (rc) NPU_WARN("deassert %s reset failed\n", Loading @@ -695,9 +699,6 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) if (npu_is_exclude_rate_clock(core_clks[i].clk_name)) continue; NPU_DBG("setting rate of clock %s to %ld\n", core_clks[i].clk_name, pwrlevel->clk_freq[i]); rc = clk_set_rate(core_clks[i].clk, pwrlevel->clk_freq[i]); /* not fatal error, keep using previous clk rate */ Loading @@ -718,11 +719,9 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) if (npu_is_post_clock(core_clks[i].clk_name)) continue; } NPU_DBG("disabling clock %s\n", core_clks[i].clk_name); clk_disable_unprepare(core_clks[i].clk); if (core_clks[i].reset) { NPU_DBG("Assert %s\n", core_clks[i].clk_name); rc = reset_control_assert(core_clks[i].reset); if (rc) NPU_WARN("assert %s reset failed\n", Loading Loading @@ -750,9 +749,6 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil) /* set clock rate to 0 before disabling it */ if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) { NPU_DBG("setting rate of clock %s to 0\n", core_clks[i].clk_name); rc = clk_set_rate(core_clks[i].clk, 0); if (rc) { NPU_ERR("clk_set_rate %s to 0 failed\n", Loading @@ -760,11 +756,9 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil) } } NPU_DBG("disabling clock %s\n", core_clks[i].clk_name); clk_disable_unprepare(core_clks[i].clk); if (core_clks[i].reset) { NPU_DBG("Assert %s\n", core_clks[i].clk_name); rc = reset_control_assert(core_clks[i].reset); if (rc) NPU_WARN("assert %s reset failed\n", Loading Loading @@ -838,11 +832,15 @@ static int npu_enable_regulators(struct npu_device *npu_dev) regulators[i].regulator_name); break; } NPU_DBG("regulator %s enabled\n", regulators[i].regulator_name); } } if (rc) { for (i--; i >= 0; i--) regulator_disable(regulators[i].regulator); } else { host_ctx->power_vote_num++; } return rc; } Loading @@ -853,11 +851,8 @@ static void npu_disable_regulators(struct npu_device *npu_dev) struct npu_regulator *regulators = npu_dev->regulators; if (host_ctx->power_vote_num > 0) { for (i = 0; i < npu_dev->regulator_num; i++) { for (i = 0; i < npu_dev->regulator_num; i++) regulator_disable(regulators[i].regulator); NPU_DBG("regulator %s disabled\n", regulators[i].regulator_name); } host_ctx->power_vote_num--; } } Loading Loading @@ -889,13 +884,12 @@ int npu_enable_irq(struct npu_device *npu_dev) reg_val |= RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE; npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_ENABLE(0), reg_val); for (i = 0; i < NPU_MAX_IRQ; i++) { if (npu_dev->irq[i].irq != 0) { for (i = 0; i < NPU_MAX_IRQ; i++) if (npu_dev->irq[i].irq != 0) enable_irq(npu_dev->irq[i].irq); NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq); } } npu_dev->irq_enabled = true; NPU_DBG("irq enabled\n"); return 0; } Loading @@ -910,12 +904,9 @@ void npu_disable_irq(struct npu_device *npu_dev) return; } for (i = 0; i < NPU_MAX_IRQ; i++) { if (npu_dev->irq[i].irq != 0) { for (i = 0; i < NPU_MAX_IRQ; i++) if (npu_dev->irq[i].irq != 0) disable_irq(npu_dev->irq[i].irq); NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq); } } reg_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_OWNER(0)); Loading @@ -930,6 +921,7 @@ void npu_disable_irq(struct npu_device *npu_dev) npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_CLEAR(0), RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE); npu_dev->irq_enabled = false; NPU_DBG("irq disabled\n"); } /* ------------------------------------------------------------------------- Loading Loading @@ -975,12 +967,13 @@ int npu_enable_sys_cache(struct npu_device *npu_dev) REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val); REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val); NPU_DBG("prior to activate sys cache\n"); rc = llcc_slice_activate(npu_dev->sys_cache); if (rc) if (rc) { NPU_ERR("failed to activate sys cache\n"); else NPU_DBG("sys cache activated\n"); llcc_slice_putd(npu_dev->sys_cache); npu_dev->sys_cache = NULL; rc = 0; } } return rc; Loading Loading @@ -1634,8 +1627,6 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab) bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE; bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE; NPU_INFO("BW MBps: AB: %d IB: %d\n", new_ab, new_ib); ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i); if (ret) { NPU_ERR("bandwidth request failed (%d)\n", ret); Loading drivers/media/platform/msm/npu_v2/npu_firmware.h +0 −5 Original line number Diff line number Diff line Loading @@ -36,11 +36,6 @@ /* Data value for debug */ #define REG_NPU_FW_DEBUG_DATA NPU_GPR13 /* Started job count */ #define REG_FW_JOB_CNT_START NPU_GPR14 /* Finished job count */ #define REG_FW_JOB_CNT_END NPU_GPR15 /* NPU FW Control/Status Register */ /* bit fields definitions in CTRL STATUS REG */ #define FW_CTRL_STATUS_IPC_READY_BIT 0 Loading drivers/media/platform/msm/npu_v2/npu_host_ipc.c +1 −3 Original line number Diff line number Diff line Loading @@ -173,9 +173,7 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev, status = INTERRUPT_RAISE_NPU(npu_dev); } if (status == 0) NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n"); else if (status) NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n"); return status; Loading drivers/media/platform/msm/npu_v2/npu_mgr.c +53 −61 Original line number Diff line number Diff line Loading @@ -106,7 +106,6 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable) } /* Keep reading ctrl status until NPU is ready */ NPU_DBG("waiting for status ready from fw\n"); if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS, FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) { ret = -EPERM; Loading Loading @@ -201,7 +200,6 @@ int load_fw(struct npu_device *npu_dev) int unload_fw(struct npu_device *npu_dev) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; int ret = 0; if (host_ctx->auto_pil_disable) { NPU_WARN("auto pil is disabled\n"); Loading @@ -219,16 +217,7 @@ int unload_fw(struct npu_device *npu_dev) return -EBUSY; } /* vote minimum bandwidth before unload npu fw via PIL */ ret = npu_set_bw(npu_dev, 100, 100); if (ret) { NPU_ERR("Can't update bandwidth\n"); mutex_unlock(&host_ctx->lock); return ret; } subsystem_put_local(host_ctx->subsystem_handle); npu_set_bw(npu_dev, 0, 0); host_ctx->fw_state = FW_UNLOADED; NPU_DBG("fw is unloaded\n"); mutex_unlock(&host_ctx->lock); Loading Loading @@ -538,9 +527,18 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code, npu_disable_core_power(npu_dev); npu_notify_aop(npu_dev, false); } /* vote minimum bandwidth before unload npu fw via PIL */ ret = npu_set_bw(npu_dev, 100, 100); if (ret) NPU_WARN("Can't update bandwidth\n"); break; } case SUBSYS_AFTER_SHUTDOWN: ret = npu_set_bw(npu_dev, 0, 0); if (ret) NPU_WARN("Can't update bandwidth\n"); break; default: NPU_DBG("Ignoring event\n"); Loading Loading @@ -637,7 +635,6 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr) struct npu_device *npu_dev = (struct npu_device *)ptr; struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; NPU_DBG("NPU ipc irq %d\n", irq); INTERRUPT_ACK(npu_dev, irq); /* Check that the event thread currently is running */ Loading @@ -653,23 +650,17 @@ irqreturn_t npu_general_intr_hdlr(int irq, void *ptr) struct npu_device *npu_dev = (struct npu_device *)ptr; struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; NPU_DBG("NPU general irq %d\n", irq); reg_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_STATUS(0)); NPU_DBG("GENERAL_IRQ_STATUS %x\n", reg_val); reg_val &= (RSC_SHUTDOWN_REQ_IRQ_STATUS | RSC_BRINGUP_REQ_IRQ_STATUS); ack_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL); if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) { NPU_DBG("Send SHUTDOWN ACK\n"); if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) ack_val |= Q6SS_RSC_SHUTDOWN_ACK_EN; } if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) { NPU_DBG("Send BRINGUP ACK\n"); if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) ack_val |= Q6SS_RSC_BRINGUP_ACK_EN; } npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, ack_val); npu_cc_reg_write(npu_dev, Loading Loading @@ -739,6 +730,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force) if (host_ctx->wdg_irq_sts) { NPU_INFO("watchdog irq triggered\n"); npu_dump_debug_info(npu_dev); fw_alive = false; } Loading Loading @@ -780,7 +772,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force) } /* Keep reading ctrl status until NPU is ready */ NPU_DBG("waiting for status ready from fw\n"); if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS, FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) { NPU_ERR("wait for fw status ready timedout\n"); Loading Loading @@ -868,8 +859,10 @@ static void npu_disable_fw_work(struct work_struct *work) npu_dev = container_of(host_ctx, struct npu_device, host_ctx); mutex_lock(&host_ctx->lock); if (host_ctx->bridge_mbox_pwr_on) { disable_fw_nolock(npu_dev); host_ctx->bridge_mbox_pwr_on = false; } mutex_unlock(&host_ctx->lock); NPU_DBG("Exit disable fw work\n"); } Loading Loading @@ -904,6 +897,12 @@ static void npu_bridge_mbox_work(struct work_struct *work) return; } if ((host_ctx->wdg_irq_sts != 0) || (host_ctx->err_irq_sts != 0)) { NPU_WARN("SSR is triggered, skip this time\n"); mutex_unlock(&host_ctx->lock); return; } /* queue or modify delayed work to disable fw */ mod_delayed_work(host_ctx->wq, &host_ctx->disable_fw_work, NPU_MBOX_IDLE_TIMEOUT); Loading Loading @@ -1179,14 +1178,6 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n", exe_rsp_pkt->header.status); NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id); NPU_DBG("e2e_IPC_time: %d (in tick count)\n", exe_rsp_pkt->stats.e2e_ipc_tick_count); NPU_DBG("aco_load_time: %d (in tick count)\n", exe_rsp_pkt->stats.aco_load_tick_count); NPU_DBG("aco_execute_time: %d (in tick count)\n", exe_rsp_pkt->stats.aco_execution_tick_count); NPU_DBG("total_num_layers: %d\n", exe_rsp_pkt->stats.exe_stats.total_num_layers); network = get_network_by_hdl(host_ctx, NULL, exe_rsp_pkt->network_hdl); Loading Loading @@ -1509,13 +1500,13 @@ static int npu_send_network_cmd(struct npu_device *npu_dev, NPU_ERR("Another cmd is pending\n"); ret = -EBUSY; } else { NPU_DBG("Send cmd %d network id %lld\n", ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type, network->id); network->cmd_async = async; network->cmd_ret_status = 0; network->cmd_pending = true; network->trans_id = atomic_read(&host_ctx->ipc_trans_id); NPU_DBG("Send cmd %d network id %llx trans id %d\n", ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type, network->id, network->trans_id); ret = npu_host_ipc_send_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, cmd_ptr); if (ret) Loading Loading @@ -1591,7 +1582,7 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx) network++; } } pr_debug("max perf mode for networks: %d\n", max_perf_mode); NPU_DBG("max perf mode for networks: %d\n", max_perf_mode); return max_perf_mode; } Loading Loading @@ -1664,8 +1655,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client, goto error_free_network; } NPU_DBG("network address %llx\n", network->phy_add); ret = set_perf_mode(npu_dev); if (ret) { NPU_ERR("set_perf_mode failed\n"); Loading @@ -1689,7 +1678,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client, reinit_completion(&network->cmd_done); ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false); if (ret) { NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret); NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret); goto error_free_network; } Loading @@ -1702,19 +1691,20 @@ int32_t npu_host_load_network_v2(struct npu_client *client, mutex_lock(&host_ctx->lock); if (!ret) { NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n"); npu_dump_debug_info(npu_dev); ret = -ETIMEDOUT; goto error_load_network; } if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during load_v2 network\n"); goto error_free_network; } if (!ret) { NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n", network->id, network->trans_id); npu_dump_debug_info(npu_dev); ret = -ETIMEDOUT; goto error_load_network; } ret = network->cmd_ret_status; if (ret) goto error_free_network; Loading Loading @@ -1817,21 +1807,23 @@ int32_t npu_host_unload_network(struct npu_client *client, mutex_lock(&host_ctx->lock); if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during unload network\n"); goto free_network; } if (!ret) { NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n"); NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n", network->id, network->trans_id); npu_dump_debug_info(npu_dev); network->cmd_pending = false; ret = -ETIMEDOUT; goto free_network; } if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during unload network\n"); } else { ret = network->cmd_ret_status; NPU_DBG("unload network status %d\n", ret); } free_network: /* Loading Loading @@ -1952,21 +1944,21 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); mutex_lock(&host_ctx->lock); if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during execute_v2 network\n"); goto free_exec_packet; } if (!ret) { NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE_V2 time out\n", network->id); NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n", network->id, network->trans_id); npu_dump_debug_info(npu_dev); network->cmd_pending = false; ret = -ETIMEDOUT; goto free_exec_packet; } if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during execute_v2 network\n"); goto free_exec_packet; } ret = network->cmd_ret_status; if (!ret) { exec_ioctl->stats_buf_size = network->stats_buf_size; Loading Loading
drivers/media/platform/msm/npu_v2/npu_dbg.c +34 −19 Original line number Diff line number Diff line Loading @@ -24,18 +24,6 @@ * Function Definitions - Debug * ------------------------------------------------------------------------- */ static void npu_dump_debug_timeout_stats(struct npu_device *npu_dev) { uint32_t reg_val; reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START); NPU_INFO("fw jobs execute started count = %d\n", reg_val); reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END); NPU_INFO("fw jobs execute finished count = %d\n", reg_val); reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA); NPU_INFO("fw jobs aco parser debug = %d\n", reg_val); } void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr) { int32_t *ptr = (int32_t *)cmd_ptr; Loading @@ -57,7 +45,7 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que) target_que * sizeof(struct hfi_queue_header); int32_t *ptr = (int32_t *)&queue; size_t content_off; uint32_t *content; uint32_t *content, content_size; int i; MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue, Loading @@ -65,21 +53,42 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que) NPU_ERR("DUMP IPC queue %d:\n", target_que); NPU_ERR("Header size %d:\n", HFI_QUEUE_HEADER_SIZE); NPU_ERR("Content size %d:\n", queue.qhdr_q_size); NPU_ERR("============QUEUE HEADER=============\n"); for (i = 0; i < HFI_QUEUE_HEADER_SIZE/4; i++) NPU_ERR("%x\n", ptr[i]); content_off = (size_t)IPC_ADDR + queue.qhdr_start_offset; content = kzalloc(queue.qhdr_q_size, GFP_KERNEL); content_off = (size_t)(IPC_ADDR + queue.qhdr_start_offset + queue.qhdr_read_idx); if (queue.qhdr_write_idx >= queue.qhdr_read_idx) content_size = queue.qhdr_write_idx - queue.qhdr_read_idx; else content_size = queue.qhdr_q_size - queue.qhdr_read_idx + queue.qhdr_write_idx; NPU_ERR("Content size %d:\n", content_size); if (content_size == 0) return; content = kzalloc(content_size, GFP_KERNEL); if (!content) { NPU_ERR("failed to allocate IPC queue content buffer\n"); return; } MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size); if (queue.qhdr_write_idx >= queue.qhdr_read_idx) { MEMR(npu_dev, (void *)content_off, content, content_size); } else { MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size - queue.qhdr_read_idx); MEMR(npu_dev, (void *)((size_t)IPC_ADDR + queue.qhdr_start_offset), (void *)((size_t)content + queue.qhdr_q_size - queue.qhdr_read_idx), queue.qhdr_write_idx); } NPU_ERR("============QUEUE CONTENT=============\n"); for (i = 0; i < queue.qhdr_q_size/4; i++) for (i = 0; i < content_size/4; i++) NPU_ERR("%x\n", content[i]); NPU_ERR("DUMP IPC queue %d END\n", target_que); Loading Loading @@ -110,7 +119,13 @@ static void npu_dump_all_ipc_queue(struct npu_device *npu_dev) void npu_dump_debug_info(struct npu_device *npu_dev) { npu_dump_debug_timeout_stats(npu_dev); struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; if (host_ctx->fw_state != FW_ENABLED) { NPU_WARN("NPU is disabled\n"); return; } npu_dump_dbg_registers(npu_dev); npu_dump_all_ipc_queue(npu_dev); }
drivers/media/platform/msm/npu_v2/npu_dev.c +34 −43 Original line number Diff line number Diff line Loading @@ -345,27 +345,30 @@ int npu_enable_core_power(struct npu_device *npu_dev) struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; int ret = 0; mutex_lock(&npu_dev->dev_lock); NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num); if (!pwr->pwr_vote_num) { ret = npu_enable_regulators(npu_dev); if (ret) return ret; goto fail; ret = npu_set_bw(npu_dev, 100, 100); if (ret) { npu_disable_regulators(npu_dev); return ret; goto fail; } ret = npu_enable_core_clocks(npu_dev); if (ret) { npu_set_bw(npu_dev, 0, 0); npu_disable_regulators(npu_dev); pwr->pwr_vote_num = 0; return ret; goto fail; } npu_resume_devbw(npu_dev); } pwr->pwr_vote_num++; fail: mutex_unlock(&npu_dev->dev_lock); return ret; } Loading @@ -374,8 +377,13 @@ void npu_disable_core_power(struct npu_device *npu_dev) { struct npu_pwrctrl *pwr = &npu_dev->pwrctrl; if (!pwr->pwr_vote_num) mutex_lock(&npu_dev->dev_lock); NPU_DBG("Disable core power %d\n", pwr->pwr_vote_num); if (!pwr->pwr_vote_num) { mutex_unlock(&npu_dev->dev_lock); return; } pwr->pwr_vote_num--; if (!pwr->pwr_vote_num) { npu_suspend_devbw(npu_dev); Loading @@ -388,6 +396,7 @@ void npu_disable_core_power(struct npu_device *npu_dev) NPU_DBG("setting back to power level=%d\n", pwr->active_pwrlevel); } mutex_unlock(&npu_dev->dev_lock); } static int npu_enable_core_clocks(struct npu_device *npu_dev) Loading Loading @@ -581,7 +590,6 @@ static void npu_suspend_devbw(struct npu_device *npu_dev) if (pwr->bwmon_enabled && (pwr->devbw_num > 0)) { for (i = 0; i < pwr->devbw_num; i++) { NPU_DBG("Suspend devbw%d\n", i); ret = devfreq_suspend_devbw(pwr->devbw[i]); if (ret) NPU_ERR("devfreq_suspend_devbw failed rc:%d\n", Loading @@ -598,7 +606,6 @@ static void npu_resume_devbw(struct npu_device *npu_dev) if (!pwr->bwmon_enabled && (pwr->devbw_num > 0)) { for (i = 0; i < pwr->devbw_num; i++) { NPU_DBG("Resume devbw%d\n", i); ret = devfreq_resume_devbw(pwr->devbw[i]); if (ret) NPU_ERR("devfreq_resume_devbw failed rc:%d\n", Loading Loading @@ -675,10 +682,7 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) continue; } NPU_DBG("enabling clock %s\n", core_clks[i].clk_name); if (core_clks[i].reset) { NPU_DBG("Deassert %s\n", core_clks[i].clk_name); rc = reset_control_deassert(core_clks[i].reset); if (rc) NPU_WARN("deassert %s reset failed\n", Loading @@ -695,9 +699,6 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) if (npu_is_exclude_rate_clock(core_clks[i].clk_name)) continue; NPU_DBG("setting rate of clock %s to %ld\n", core_clks[i].clk_name, pwrlevel->clk_freq[i]); rc = clk_set_rate(core_clks[i].clk, pwrlevel->clk_freq[i]); /* not fatal error, keep using previous clk rate */ Loading @@ -718,11 +719,9 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil) if (npu_is_post_clock(core_clks[i].clk_name)) continue; } NPU_DBG("disabling clock %s\n", core_clks[i].clk_name); clk_disable_unprepare(core_clks[i].clk); if (core_clks[i].reset) { NPU_DBG("Assert %s\n", core_clks[i].clk_name); rc = reset_control_assert(core_clks[i].reset); if (rc) NPU_WARN("assert %s reset failed\n", Loading Loading @@ -750,9 +749,6 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil) /* set clock rate to 0 before disabling it */ if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) { NPU_DBG("setting rate of clock %s to 0\n", core_clks[i].clk_name); rc = clk_set_rate(core_clks[i].clk, 0); if (rc) { NPU_ERR("clk_set_rate %s to 0 failed\n", Loading @@ -760,11 +756,9 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil) } } NPU_DBG("disabling clock %s\n", core_clks[i].clk_name); clk_disable_unprepare(core_clks[i].clk); if (core_clks[i].reset) { NPU_DBG("Assert %s\n", core_clks[i].clk_name); rc = reset_control_assert(core_clks[i].reset); if (rc) NPU_WARN("assert %s reset failed\n", Loading Loading @@ -838,11 +832,15 @@ static int npu_enable_regulators(struct npu_device *npu_dev) regulators[i].regulator_name); break; } NPU_DBG("regulator %s enabled\n", regulators[i].regulator_name); } } if (rc) { for (i--; i >= 0; i--) regulator_disable(regulators[i].regulator); } else { host_ctx->power_vote_num++; } return rc; } Loading @@ -853,11 +851,8 @@ static void npu_disable_regulators(struct npu_device *npu_dev) struct npu_regulator *regulators = npu_dev->regulators; if (host_ctx->power_vote_num > 0) { for (i = 0; i < npu_dev->regulator_num; i++) { for (i = 0; i < npu_dev->regulator_num; i++) regulator_disable(regulators[i].regulator); NPU_DBG("regulator %s disabled\n", regulators[i].regulator_name); } host_ctx->power_vote_num--; } } Loading Loading @@ -889,13 +884,12 @@ int npu_enable_irq(struct npu_device *npu_dev) reg_val |= RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE; npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_ENABLE(0), reg_val); for (i = 0; i < NPU_MAX_IRQ; i++) { if (npu_dev->irq[i].irq != 0) { for (i = 0; i < NPU_MAX_IRQ; i++) if (npu_dev->irq[i].irq != 0) enable_irq(npu_dev->irq[i].irq); NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq); } } npu_dev->irq_enabled = true; NPU_DBG("irq enabled\n"); return 0; } Loading @@ -910,12 +904,9 @@ void npu_disable_irq(struct npu_device *npu_dev) return; } for (i = 0; i < NPU_MAX_IRQ; i++) { if (npu_dev->irq[i].irq != 0) { for (i = 0; i < NPU_MAX_IRQ; i++) if (npu_dev->irq[i].irq != 0) disable_irq(npu_dev->irq[i].irq); NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq); } } reg_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_OWNER(0)); Loading @@ -930,6 +921,7 @@ void npu_disable_irq(struct npu_device *npu_dev) npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_CLEAR(0), RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE); npu_dev->irq_enabled = false; NPU_DBG("irq disabled\n"); } /* ------------------------------------------------------------------------- Loading Loading @@ -975,12 +967,13 @@ int npu_enable_sys_cache(struct npu_device *npu_dev) REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val); REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val); NPU_DBG("prior to activate sys cache\n"); rc = llcc_slice_activate(npu_dev->sys_cache); if (rc) if (rc) { NPU_ERR("failed to activate sys cache\n"); else NPU_DBG("sys cache activated\n"); llcc_slice_putd(npu_dev->sys_cache); npu_dev->sys_cache = NULL; rc = 0; } } return rc; Loading Loading @@ -1634,8 +1627,6 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab) bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE; bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE; NPU_INFO("BW MBps: AB: %d IB: %d\n", new_ab, new_ib); ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i); if (ret) { NPU_ERR("bandwidth request failed (%d)\n", ret); Loading
drivers/media/platform/msm/npu_v2/npu_firmware.h +0 −5 Original line number Diff line number Diff line Loading @@ -36,11 +36,6 @@ /* Data value for debug */ #define REG_NPU_FW_DEBUG_DATA NPU_GPR13 /* Started job count */ #define REG_FW_JOB_CNT_START NPU_GPR14 /* Finished job count */ #define REG_FW_JOB_CNT_END NPU_GPR15 /* NPU FW Control/Status Register */ /* bit fields definitions in CTRL STATUS REG */ #define FW_CTRL_STATUS_IPC_READY_BIT 0 Loading
drivers/media/platform/msm/npu_v2/npu_host_ipc.c +1 −3 Original line number Diff line number Diff line Loading @@ -173,9 +173,7 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev, status = INTERRUPT_RAISE_NPU(npu_dev); } if (status == 0) NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n"); else if (status) NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n"); return status; Loading
drivers/media/platform/msm/npu_v2/npu_mgr.c +53 −61 Original line number Diff line number Diff line Loading @@ -106,7 +106,6 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable) } /* Keep reading ctrl status until NPU is ready */ NPU_DBG("waiting for status ready from fw\n"); if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS, FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) { ret = -EPERM; Loading Loading @@ -201,7 +200,6 @@ int load_fw(struct npu_device *npu_dev) int unload_fw(struct npu_device *npu_dev) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; int ret = 0; if (host_ctx->auto_pil_disable) { NPU_WARN("auto pil is disabled\n"); Loading @@ -219,16 +217,7 @@ int unload_fw(struct npu_device *npu_dev) return -EBUSY; } /* vote minimum bandwidth before unload npu fw via PIL */ ret = npu_set_bw(npu_dev, 100, 100); if (ret) { NPU_ERR("Can't update bandwidth\n"); mutex_unlock(&host_ctx->lock); return ret; } subsystem_put_local(host_ctx->subsystem_handle); npu_set_bw(npu_dev, 0, 0); host_ctx->fw_state = FW_UNLOADED; NPU_DBG("fw is unloaded\n"); mutex_unlock(&host_ctx->lock); Loading Loading @@ -538,9 +527,18 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code, npu_disable_core_power(npu_dev); npu_notify_aop(npu_dev, false); } /* vote minimum bandwidth before unload npu fw via PIL */ ret = npu_set_bw(npu_dev, 100, 100); if (ret) NPU_WARN("Can't update bandwidth\n"); break; } case SUBSYS_AFTER_SHUTDOWN: ret = npu_set_bw(npu_dev, 0, 0); if (ret) NPU_WARN("Can't update bandwidth\n"); break; default: NPU_DBG("Ignoring event\n"); Loading Loading @@ -637,7 +635,6 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr) struct npu_device *npu_dev = (struct npu_device *)ptr; struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; NPU_DBG("NPU ipc irq %d\n", irq); INTERRUPT_ACK(npu_dev, irq); /* Check that the event thread currently is running */ Loading @@ -653,23 +650,17 @@ irqreturn_t npu_general_intr_hdlr(int irq, void *ptr) struct npu_device *npu_dev = (struct npu_device *)ptr; struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; NPU_DBG("NPU general irq %d\n", irq); reg_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_STATUS(0)); NPU_DBG("GENERAL_IRQ_STATUS %x\n", reg_val); reg_val &= (RSC_SHUTDOWN_REQ_IRQ_STATUS | RSC_BRINGUP_REQ_IRQ_STATUS); ack_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL); if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) { NPU_DBG("Send SHUTDOWN ACK\n"); if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) ack_val |= Q6SS_RSC_SHUTDOWN_ACK_EN; } if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) { NPU_DBG("Send BRINGUP ACK\n"); if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) ack_val |= Q6SS_RSC_BRINGUP_ACK_EN; } npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, ack_val); npu_cc_reg_write(npu_dev, Loading Loading @@ -739,6 +730,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force) if (host_ctx->wdg_irq_sts) { NPU_INFO("watchdog irq triggered\n"); npu_dump_debug_info(npu_dev); fw_alive = false; } Loading Loading @@ -780,7 +772,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force) } /* Keep reading ctrl status until NPU is ready */ NPU_DBG("waiting for status ready from fw\n"); if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS, FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) { NPU_ERR("wait for fw status ready timedout\n"); Loading Loading @@ -868,8 +859,10 @@ static void npu_disable_fw_work(struct work_struct *work) npu_dev = container_of(host_ctx, struct npu_device, host_ctx); mutex_lock(&host_ctx->lock); if (host_ctx->bridge_mbox_pwr_on) { disable_fw_nolock(npu_dev); host_ctx->bridge_mbox_pwr_on = false; } mutex_unlock(&host_ctx->lock); NPU_DBG("Exit disable fw work\n"); } Loading Loading @@ -904,6 +897,12 @@ static void npu_bridge_mbox_work(struct work_struct *work) return; } if ((host_ctx->wdg_irq_sts != 0) || (host_ctx->err_irq_sts != 0)) { NPU_WARN("SSR is triggered, skip this time\n"); mutex_unlock(&host_ctx->lock); return; } /* queue or modify delayed work to disable fw */ mod_delayed_work(host_ctx->wq, &host_ctx->disable_fw_work, NPU_MBOX_IDLE_TIMEOUT); Loading Loading @@ -1179,14 +1178,6 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg) NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n", exe_rsp_pkt->header.status); NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id); NPU_DBG("e2e_IPC_time: %d (in tick count)\n", exe_rsp_pkt->stats.e2e_ipc_tick_count); NPU_DBG("aco_load_time: %d (in tick count)\n", exe_rsp_pkt->stats.aco_load_tick_count); NPU_DBG("aco_execute_time: %d (in tick count)\n", exe_rsp_pkt->stats.aco_execution_tick_count); NPU_DBG("total_num_layers: %d\n", exe_rsp_pkt->stats.exe_stats.total_num_layers); network = get_network_by_hdl(host_ctx, NULL, exe_rsp_pkt->network_hdl); Loading Loading @@ -1509,13 +1500,13 @@ static int npu_send_network_cmd(struct npu_device *npu_dev, NPU_ERR("Another cmd is pending\n"); ret = -EBUSY; } else { NPU_DBG("Send cmd %d network id %lld\n", ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type, network->id); network->cmd_async = async; network->cmd_ret_status = 0; network->cmd_pending = true; network->trans_id = atomic_read(&host_ctx->ipc_trans_id); NPU_DBG("Send cmd %d network id %llx trans id %d\n", ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type, network->id, network->trans_id); ret = npu_host_ipc_send_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, cmd_ptr); if (ret) Loading Loading @@ -1591,7 +1582,7 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx) network++; } } pr_debug("max perf mode for networks: %d\n", max_perf_mode); NPU_DBG("max perf mode for networks: %d\n", max_perf_mode); return max_perf_mode; } Loading Loading @@ -1664,8 +1655,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client, goto error_free_network; } NPU_DBG("network address %llx\n", network->phy_add); ret = set_perf_mode(npu_dev); if (ret) { NPU_ERR("set_perf_mode failed\n"); Loading @@ -1689,7 +1678,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client, reinit_completion(&network->cmd_done); ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false); if (ret) { NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret); NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret); goto error_free_network; } Loading @@ -1702,19 +1691,20 @@ int32_t npu_host_load_network_v2(struct npu_client *client, mutex_lock(&host_ctx->lock); if (!ret) { NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n"); npu_dump_debug_info(npu_dev); ret = -ETIMEDOUT; goto error_load_network; } if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during load_v2 network\n"); goto error_free_network; } if (!ret) { NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n", network->id, network->trans_id); npu_dump_debug_info(npu_dev); ret = -ETIMEDOUT; goto error_load_network; } ret = network->cmd_ret_status; if (ret) goto error_free_network; Loading Loading @@ -1817,21 +1807,23 @@ int32_t npu_host_unload_network(struct npu_client *client, mutex_lock(&host_ctx->lock); if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during unload network\n"); goto free_network; } if (!ret) { NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n"); NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n", network->id, network->trans_id); npu_dump_debug_info(npu_dev); network->cmd_pending = false; ret = -ETIMEDOUT; goto free_network; } if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during unload network\n"); } else { ret = network->cmd_ret_status; NPU_DBG("unload network status %d\n", ret); } free_network: /* Loading Loading @@ -1952,21 +1944,21 @@ int32_t npu_host_exec_network_v2(struct npu_client *client, NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT); mutex_lock(&host_ctx->lock); if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during execute_v2 network\n"); goto free_exec_packet; } if (!ret) { NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE_V2 time out\n", network->id); NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n", network->id, network->trans_id); npu_dump_debug_info(npu_dev); network->cmd_pending = false; ret = -ETIMEDOUT; goto free_exec_packet; } if (network->fw_error) { ret = -EIO; NPU_ERR("fw is in error state during execute_v2 network\n"); goto free_exec_packet; } ret = network->cmd_ret_status; if (!ret) { exec_ioctl->stats_buf_size = network->stats_buf_size; Loading