Loading drivers/scsi/ufs/ufshcd.c +21 −2 Original line number Diff line number Diff line Loading @@ -1523,6 +1523,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) } spin_unlock_irqrestore(hba->host->host_lock, flags); out: hba->ufs_stats.clk_hold.ts = ktime_get(); return rc; } EXPORT_SYMBOL_GPL(ufshcd_hold); Loading Loading @@ -1627,6 +1628,7 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched) hba->clk_gating.state = REQ_CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); hba->ufs_stats.clk_rel.ts = ktime_get(); hrtimer_start(&hba->clk_gating.gate_hrtimer, ms_to_ktime(hba->clk_gating.delay_ms), Loading Loading @@ -2073,8 +2075,10 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work) /* Exit from hibern8 */ if (ufshcd_is_link_hibern8(hba)) { hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK; ufshcd_hold(hba, false); ret = ufshcd_uic_hibern8_exit(hba); hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK; ufshcd_release(hba, false); if (!ret) { spin_lock_irqsave(hba->host->host_lock, flags); Loading Loading @@ -2500,6 +2504,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) int ret; unsigned long flags; hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND; ufshcd_hold_all(hba); mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); Loading @@ -2513,6 +2518,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_save_tstamp_of_last_dme_cmd(hba); mutex_unlock(&hba->uic_cmd_mutex); ufshcd_release_all(hba); hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND; ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_UIC, 0, &ret); Loading Loading @@ -2999,6 +3005,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } hba->ufs_stats.clk_hold.ctx = QUEUE_CMD; err = ufshcd_hold(hba, true); if (err) { err = SCSI_MLQUEUE_HOST_BUSY; Loading @@ -3013,6 +3020,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (err) { clear_bit_unlock(tag, &hba->lrb_in_use); err = SCSI_MLQUEUE_HOST_BUSY; hba->ufs_stats.clk_rel.ctx = QUEUE_CMD; ufshcd_release(hba, true); goto out; } Loading Loading @@ -4392,8 +4400,10 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) uic_cmd.command = UIC_CMD_DME_SET; uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); uic_cmd.argument3 = mode; hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND; ufshcd_hold_all(hba); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND; ufshcd_release_all(hba); out: return ret; Loading Loading @@ -5580,6 +5590,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, update_req_stats(hba, lrbp); /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL; __ufshcd_release(hba, false); __ufshcd_hibern8_release(hba, false); if (cmd->request) { Loading Loading @@ -6101,6 +6112,7 @@ static void ufshcd_err_handler(struct work_struct *work) if (unlikely((hba->clk_gating.state != CLKS_ON) && ufshcd_is_auto_hibern8_supported(hba))) { spin_unlock_irqrestore(hba->host->host_lock, flags); hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK; ufshcd_hold(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); clks_enabled = true; Loading Loading @@ -6245,8 +6257,10 @@ static void ufshcd_err_handler(struct work_struct *work) hba->silence_err_logs = false; if (clks_enabled) if (clks_enabled) { __ufshcd_release(hba, false); hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK; } out: ufshcd_clear_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); Loading Loading @@ -6482,7 +6496,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) spin_lock(hba->host->host_lock); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); hba->ufs_stats.last_intr_status = intr_status; hba->ufs_stats.last_intr_ts = ktime_get(); /* * There could be max of hba->nutrs reqs in flight and in worst case * if the reqs get finished 1 by 1 after the interrupt status is Loading Loading @@ -6561,6 +6576,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND; ufshcd_hold_all(hba); spin_lock_irqsave(host->host_lock, flags); Loading Loading @@ -6618,6 +6634,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, clear_bit(free_slot, &hba->tm_condition); ufshcd_put_tm_slot(hba, free_slot); wake_up(&hba->tm_tag_wq); hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND; ufshcd_release_all(hba); return err; Loading Loading @@ -9635,6 +9652,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) int ret = 0; /* let's not get into low power until clock scaling is completed */ hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK; ufshcd_hold_all(hba); ret = ufshcd_clock_scaling_prepare(hba); Loading Loading @@ -9698,6 +9716,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) clk_scaling_unprepare: ufshcd_clock_scaling_unprepare(hba); out: hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK; ufshcd_release_all(hba); return ret; } Loading drivers/scsi/ufs/ufshcd.h +20 −0 Original line number Diff line number Diff line Loading @@ -584,6 +584,22 @@ struct ufshcd_req_stat { }; #endif enum ufshcd_ctx { QUEUE_CMD, ERR_HNDLR_WORK, H8_EXIT_WORK, UIC_CMD_SEND, PWRCTL_CMD_SEND, TM_CMD_SEND, XFR_REQ_COMPL, CLK_SCALE_WORK, }; struct ufshcd_clk_ctx { ktime_t ts; enum ufshcd_ctx ctx; }; /** * struct ufs_stats - keeps usage/err statistics * @enabled: enable tag stats for debugfs Loading Loading @@ -612,6 +628,10 @@ struct ufs_stats { int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN]; #endif u32 last_intr_status; ktime_t last_intr_ts; struct ufshcd_clk_ctx clk_hold; struct ufshcd_clk_ctx clk_rel; u32 hibern8_exit_cnt; ktime_t last_hibern8_exit_tstamp; struct ufs_uic_err_reg_hist pa_err; Loading Loading
drivers/scsi/ufs/ufshcd.c +21 −2 Original line number Diff line number Diff line Loading @@ -1523,6 +1523,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) } spin_unlock_irqrestore(hba->host->host_lock, flags); out: hba->ufs_stats.clk_hold.ts = ktime_get(); return rc; } EXPORT_SYMBOL_GPL(ufshcd_hold); Loading Loading @@ -1627,6 +1628,7 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched) hba->clk_gating.state = REQ_CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); hba->ufs_stats.clk_rel.ts = ktime_get(); hrtimer_start(&hba->clk_gating.gate_hrtimer, ms_to_ktime(hba->clk_gating.delay_ms), Loading Loading @@ -2073,8 +2075,10 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work) /* Exit from hibern8 */ if (ufshcd_is_link_hibern8(hba)) { hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK; ufshcd_hold(hba, false); ret = ufshcd_uic_hibern8_exit(hba); hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK; ufshcd_release(hba, false); if (!ret) { spin_lock_irqsave(hba->host->host_lock, flags); Loading Loading @@ -2500,6 +2504,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) int ret; unsigned long flags; hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND; ufshcd_hold_all(hba); mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); Loading @@ -2513,6 +2518,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_save_tstamp_of_last_dme_cmd(hba); mutex_unlock(&hba->uic_cmd_mutex); ufshcd_release_all(hba); hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND; ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_UIC, 0, &ret); Loading Loading @@ -2999,6 +3005,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } hba->ufs_stats.clk_hold.ctx = QUEUE_CMD; err = ufshcd_hold(hba, true); if (err) { err = SCSI_MLQUEUE_HOST_BUSY; Loading @@ -3013,6 +3020,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (err) { clear_bit_unlock(tag, &hba->lrb_in_use); err = SCSI_MLQUEUE_HOST_BUSY; hba->ufs_stats.clk_rel.ctx = QUEUE_CMD; ufshcd_release(hba, true); goto out; } Loading Loading @@ -4392,8 +4400,10 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) uic_cmd.command = UIC_CMD_DME_SET; uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); uic_cmd.argument3 = mode; hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND; ufshcd_hold_all(hba); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND; ufshcd_release_all(hba); out: return ret; Loading Loading @@ -5580,6 +5590,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, update_req_stats(hba, lrbp); /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL; __ufshcd_release(hba, false); __ufshcd_hibern8_release(hba, false); if (cmd->request) { Loading Loading @@ -6101,6 +6112,7 @@ static void ufshcd_err_handler(struct work_struct *work) if (unlikely((hba->clk_gating.state != CLKS_ON) && ufshcd_is_auto_hibern8_supported(hba))) { spin_unlock_irqrestore(hba->host->host_lock, flags); hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK; ufshcd_hold(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); clks_enabled = true; Loading Loading @@ -6245,8 +6257,10 @@ static void ufshcd_err_handler(struct work_struct *work) hba->silence_err_logs = false; if (clks_enabled) if (clks_enabled) { __ufshcd_release(hba, false); hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK; } out: ufshcd_clear_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); Loading Loading @@ -6482,7 +6496,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) spin_lock(hba->host->host_lock); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); hba->ufs_stats.last_intr_status = intr_status; hba->ufs_stats.last_intr_ts = ktime_get(); /* * There could be max of hba->nutrs reqs in flight and in worst case * if the reqs get finished 1 by 1 after the interrupt status is Loading Loading @@ -6561,6 +6576,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND; ufshcd_hold_all(hba); spin_lock_irqsave(host->host_lock, flags); Loading Loading @@ -6618,6 +6634,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, clear_bit(free_slot, &hba->tm_condition); ufshcd_put_tm_slot(hba, free_slot); wake_up(&hba->tm_tag_wq); hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND; ufshcd_release_all(hba); return err; Loading Loading @@ -9635,6 +9652,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) int ret = 0; /* let's not get into low power until clock scaling is completed */ hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK; ufshcd_hold_all(hba); ret = ufshcd_clock_scaling_prepare(hba); Loading Loading @@ -9698,6 +9716,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) clk_scaling_unprepare: ufshcd_clock_scaling_unprepare(hba); out: hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK; ufshcd_release_all(hba); return ret; } Loading
drivers/scsi/ufs/ufshcd.h +20 −0 Original line number Diff line number Diff line Loading @@ -584,6 +584,22 @@ struct ufshcd_req_stat { }; #endif enum ufshcd_ctx { QUEUE_CMD, ERR_HNDLR_WORK, H8_EXIT_WORK, UIC_CMD_SEND, PWRCTL_CMD_SEND, TM_CMD_SEND, XFR_REQ_COMPL, CLK_SCALE_WORK, }; struct ufshcd_clk_ctx { ktime_t ts; enum ufshcd_ctx ctx; }; /** * struct ufs_stats - keeps usage/err statistics * @enabled: enable tag stats for debugfs Loading Loading @@ -612,6 +628,10 @@ struct ufs_stats { int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN]; #endif u32 last_intr_status; ktime_t last_intr_ts; struct ufshcd_clk_ctx clk_hold; struct ufshcd_clk_ctx clk_rel; u32 hibern8_exit_cnt; ktime_t last_hibern8_exit_tstamp; struct ufs_uic_err_reg_hist pa_err; Loading