Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2dc93cbe authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "scsi: ufs: make error handling bit faster"

parents 02854244 c6029bb6
Loading
Loading
Loading
Loading
+108 −47
Original line number Diff line number Diff line
@@ -4650,31 +4650,18 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
}

/**
 * ufshcd_transfer_req_compl - handle SCSI and query command completion
 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
 * @hba: per adapter instance
 * @completed_reqs: requests to complete
 */
static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
					unsigned long completed_reqs)
{
	struct ufshcd_lrb *lrbp;
	struct scsi_cmnd *cmd;
	unsigned long completed_reqs;
	u32 tr_doorbell;
	int result;
	int index;

	/* Resetting interrupt aggregation counters first and reading the
	 * DOOR_BELL afterward allows us to handle all the completed requests.
	 * In order to prevent other interrupts starvation the DB is read once
	 * after reset. The down side of this solution is the possibility of
	 * false interrupt if device completes another request after resetting
	 * aggregation and before reading the DB.
	 */
	if (ufshcd_is_intr_aggr_allowed(hba))
		ufshcd_reset_intr_aggr(hba);

	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;

	for_each_set_bit(index, &completed_reqs, hba->nutrs) {
		lrbp = &hba->lrb[index];
		cmd = lrbp->cmd;
@@ -4710,6 +4697,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
	wake_up(&hba->dev_cmd.tag_wq);
}

/**
 * ufshcd_transfer_req_compl - handle SCSI and query command completion
 * @hba: per adapter instance
 */
static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
	unsigned long completed_reqs;
	u32 tr_doorbell;

	/* Resetting interrupt aggregation counters first and reading the
	 * DOOR_BELL afterward allows us to handle all the completed requests.
	 * In order to prevent other interrupts starvation the DB is read once
	 * after reset. The down side of this solution is the possibility of
	 * false interrupt if device completes another request after resetting
	 * aggregation and before reading the DB.
	 */
	if (ufshcd_is_intr_aggr_allowed(hba))
		ufshcd_reset_intr_aggr(hba);

	tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
	completed_reqs = tr_doorbell ^ hba->outstanding_reqs;

	__ufshcd_transfer_req_compl(hba, completed_reqs);
}

/**
 * ufshcd_disable_ee - disable exception event
 * @hba: per-adapter instance
@@ -4976,6 +4988,12 @@ out:
	return;
}

/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
	ufshcd_transfer_req_compl(hba);
	ufshcd_tmc_handler(hba);
}
/**
 * ufshcd_err_handler - handle UFS errors that require s/w attention
 * @work: pointer to work structure
@@ -4984,11 +5002,11 @@ static void ufshcd_err_handler(struct work_struct *work)
{
	struct ufs_hba *hba;
	unsigned long flags;
	u32 err_xfer = 0;
	u32 err_tm = 0;
	bool err_xfer = false, err_tm = false;
	int err = 0;
	int tag;
	int crypto_engine_err = 0;
	bool needs_reset = false;

	hba = container_of(work, struct ufs_hba, eh_work);

@@ -4996,14 +5014,15 @@ static void ufshcd_err_handler(struct work_struct *work)
	ufshcd_hold_all(hba);

	spin_lock_irqsave(hba->host->host_lock, flags);
	if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
		spin_unlock_irqrestore(hba->host->host_lock, flags);
	if (hba->ufshcd_state == UFSHCD_STATE_RESET)
		goto out;
	}

	hba->ufshcd_state = UFSHCD_STATE_RESET;
	ufshcd_set_eh_in_progress(hba);

	/* Complete requests that have door-bell cleared by h/w */
	ufshcd_complete_requests(hba);

	/*
	 * Dump controller state before resetting. Transfer requests state
	 * will be dump as part of the request completion.
@@ -5016,35 +5035,53 @@ static void ufshcd_err_handler(struct work_struct *work)
		ufshcd_print_tmrs(hba, hba->outstanding_tasks);
	}

	/* Complete requests that have door-bell cleared by h/w */
	ufshcd_transfer_req_compl(hba);
	ufshcd_tmc_handler(hba);
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	if (hba->vops && hba->vops->crypto_engine_get_err)
		crypto_engine_err = hba->vops->crypto_engine_get_err(hba);

	if ((hba->saved_err & INT_FATAL_ERRORS) || crypto_engine_err ||
	    ((hba->saved_err & UIC_ERROR) &&
	    (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR))))
		needs_reset = true;

	/*
	 * if host reset is required then skip clearing the pending
	 * transfers forcefully because they will automatically get
	 * cleared after link startup.
	 */
	if (needs_reset)
		goto skip_pending_xfer_clear;

	/* release lock as clear command might sleep */
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	/* Clear pending transfer requests */
	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
		if (ufshcd_clear_cmd(hba, tag))
			err_xfer |= 1 << tag;
	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
		if (ufshcd_clear_cmd(hba, tag)) {
			err_xfer = true;
			goto lock_skip_pending_xfer_clear;
		}
	}

	/* Clear pending task management requests */
	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
		if (ufshcd_clear_tm_cmd(hba, tag))
			err_tm |= 1 << tag;
	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
		if (ufshcd_clear_tm_cmd(hba, tag)) {
			err_tm = true;
			goto lock_skip_pending_xfer_clear;
		}
	}

	/* Complete the requests that are cleared by s/w */
lock_skip_pending_xfer_clear:
	spin_lock_irqsave(hba->host->host_lock, flags);
	ufshcd_transfer_req_compl(hba);
	ufshcd_tmc_handler(hba);
	spin_unlock_irqrestore(hba->host->host_lock, flags);

	if (hba->vops && hba->vops->crypto_engine_get_err)
		crypto_engine_err = hba->vops->crypto_engine_get_err(hba);
	/* Complete the requests that are cleared by s/w */
	ufshcd_complete_requests(hba);

	if (err_xfer || err_tm)
		needs_reset = true;

skip_pending_xfer_clear:
	/* Fatal errors need reset */
	if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
			((hba->saved_err & UIC_ERROR) &&
			 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR)) ||
			 crypto_engine_err) {
	if (needs_reset) {
		unsigned long max_doorbells = (1UL << hba->nutrs) - 1;

		if (hba->saved_err & INT_FATAL_ERRORS || crypto_engine_err)
			UFSHCD_UPDATE_ERROR_STATS(hba,
@@ -5058,7 +5095,20 @@ static void ufshcd_err_handler(struct work_struct *work)
			UFSHCD_UPDATE_ERROR_STATS(hba,
						  UFS_ERR_CLEAR_PEND_XFER_TM);

		/*
		 * ufshcd_reset_and_restore() does the link reinitialization
		 * which will need atleast one empty doorbell slot to send the
		 * device management commands (NOP and query commands).
		 * If there is no slot empty at this moment then free up last
		 * slot forcefully.
		 */
		if (hba->outstanding_reqs == max_doorbells)
			__ufshcd_transfer_req_compl(hba,
						    (1UL << (hba->nutrs - 1)));

		spin_unlock_irqrestore(hba->host->host_lock, flags);
		err = ufshcd_reset_and_restore(hba);
		spin_lock_irqsave(hba->host->host_lock, flags);
		if (err) {
			dev_err(hba->dev, "%s: reset and restore failed\n",
					__func__);
@@ -5074,9 +5124,17 @@ static void ufshcd_err_handler(struct work_struct *work)
		if (hba->vops && hba->vops->crypto_engine_reset_err)
			hba->vops->crypto_engine_reset_err(hba);
	}
	ufshcd_clear_eh_in_progress(hba);

	if (!needs_reset) {
		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
		if (hba->saved_err || hba->saved_uic_err)
			dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
			    __func__, hba->saved_err, hba->saved_uic_err);
	}

	ufshcd_clear_eh_in_progress(hba);
out:
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	scsi_unblock_requests(hba->host);
	ufshcd_release_all(hba);
	pm_runtime_put_sync(hba->dev);
@@ -5166,15 +5224,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
	}

	if (queue_eh_work) {
		/*
		 * update the transfer error masks to sticky bits, let's do this
		 * irrespective of current ufshcd_state.
		 */
		hba->saved_err |= hba->errors;
		hba->saved_uic_err |= hba->uic_error;

		/* handle fatal errors only when link is functional */
		if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
			/* block commands from scsi mid-layer */
			scsi_block_requests(hba->host);

			/* transfer error masks to sticky bits */
			hba->saved_err |= hba->errors;
			hba->saved_uic_err |= hba->uic_error;

			hba->ufshcd_state = UFSHCD_STATE_ERROR;

			schedule_work(&hba->eh_work);