Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4835df40 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "scsi: ufs: add reference counting for scsi block requests"

parents 638296b1 0c16dccd
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -1217,11 +1217,11 @@ static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
	int ret;

	pm_runtime_get_sync(hba->dev);
	scsi_block_requests(hba->host);
	ufshcd_scsi_block_requests(hba);
	ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
	if (!ret)
		ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
	scsi_unblock_requests(hba->host);
	ufshcd_scsi_unblock_requests(hba);
	pm_runtime_put_sync(hba->dev);

	return ret;
@@ -1312,7 +1312,7 @@ static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)
	attr_id = peer ? hba->debugfs_files.dme_peer_attr_id :
			 hba->debugfs_files.dme_local_attr_id;
	pm_runtime_get_sync(hba->dev);
	scsi_block_requests(hba->host);
	ufshcd_scsi_block_requests(hba);
	ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
	if (!ret) {
		if ((attr_id >= MPHY_RX_ATTR_ADDR_START)
@@ -1324,7 +1324,7 @@ static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)

		ret = read_func(hba, attr_sel, &read_val);
	}
	scsi_unblock_requests(hba->host);
	ufshcd_scsi_unblock_requests(hba);
	pm_runtime_put_sync(hba->dev);

	if (!ret)
+1 −1
Original line number Diff line number Diff line
@@ -732,7 +732,7 @@ static int ufs_qcom_crypto_engine_eh(struct ufs_hba *hba)
			 * Host reset will be handled in a seperate workqueue
			 * and will be triggered from ufshcd_check_errors.
			 */
			scsi_block_requests(hba->host);
			ufshcd_scsi_block_requests(hba);

			ufshcd_abort_outstanding_transfer_requests(hba,
					DID_TARGET_FAILURE);
+41 −11
Original line number Diff line number Diff line
@@ -397,6 +397,36 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
	}
}

void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
	unsigned long flags;
	bool unblock = false;

	spin_lock_irqsave(hba->host->host_lock, flags);
	hba->scsi_block_reqs_cnt--;
	unblock = !hba->scsi_block_reqs_cnt;
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	if (unblock)
		scsi_unblock_requests(hba->host);
}
EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);

static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
{
	if (!hba->scsi_block_reqs_cnt++)
		scsi_block_requests(hba->host);
}

void ufshcd_scsi_block_requests(struct ufs_hba *hba)
{
	unsigned long flags;

	spin_lock_irqsave(hba->host->host_lock, flags);
	__ufshcd_scsi_block_requests(hba);
	spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL(ufshcd_scsi_block_requests);

/* replace non-printable or non-ASCII characters with spaces */
static inline void ufshcd_remove_non_printable(char *val)
{
@@ -1157,7 +1187,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
unblock_reqs:
	if (hba->clk_scaling.is_allowed)
		ufshcd_resume_clkscaling(hba);
	scsi_unblock_requests(hba->host);
	ufshcd_scsi_unblock_requests(hba);
}

/**
@@ -1198,7 +1228,7 @@ start:
		 * work and to enable clocks.
		 */
	case CLKS_OFF:
		scsi_block_requests(hba->host);
		__ufshcd_scsi_block_requests(hba);
		hba->clk_gating.state = REQ_CLKS_ON;
		trace_ufshcd_clk_gating(dev_name(hba->dev),
			hba->clk_gating.state);
@@ -1471,7 +1501,7 @@ start:
		 * work and exit hibern8.
		 */
	case HIBERN8_ENTERED:
		scsi_block_requests(hba->host);
		__ufshcd_scsi_block_requests(hba);
		hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
			hba->hibern8_on_idle.state);
@@ -1634,7 +1664,7 @@ static void ufshcd_hibern8_exit_work(struct work_struct *work)
		}
	}
unblock_reqs:
	scsi_unblock_requests(hba->host);
	ufshcd_scsi_unblock_requests(hba);
}

static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
@@ -5136,7 +5166,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
	hba = container_of(work, struct ufs_hba, eeh_work);

	pm_runtime_get_sync(hba->dev);
	scsi_block_requests(hba->host);
	ufshcd_scsi_block_requests(hba);
	err = ufshcd_get_ee_status(hba, &status);
	if (err) {
		dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -5150,7 +5180,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
		ufshcd_bkops_exception_event_handler(hba);

out:
	scsi_unblock_requests(hba->host);
	ufshcd_scsi_unblock_requests(hba);
	pm_runtime_put_sync(hba->dev);
	return;
}
@@ -5413,7 +5443,7 @@ skip_err_handling:
	ufshcd_clear_eh_in_progress(hba);
out:
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	scsi_unblock_requests(hba->host);
	ufshcd_scsi_unblock_requests(hba);
	ufshcd_release_all(hba);
	pm_runtime_put_sync(hba->dev);
}
@@ -5519,7 +5549,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
		/* handle fatal errors only when link is functional */
		if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
			/* block commands from scsi mid-layer */
			scsi_block_requests(hba->host);
			__ufshcd_scsi_block_requests(hba);

			hba->ufshcd_state = UFSHCD_STATE_ERROR;
			schedule_work(&hba->eh_work);
@@ -8261,12 +8291,12 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
	 * make sure that there are no outstanding requests when
	 * clock scaling is in progress
	 */
	scsi_block_requests(hba->host);
	ufshcd_scsi_block_requests(hba);
	down_write(&hba->clk_scaling_lock);
	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
		ret = -EBUSY;
		up_write(&hba->clk_scaling_lock);
		scsi_unblock_requests(hba->host);
		ufshcd_scsi_unblock_requests(hba);
	}

	return ret;
@@ -8275,7 +8305,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
	up_write(&hba->clk_scaling_lock);
	scsi_unblock_requests(hba->host);
	ufshcd_scsi_unblock_requests(hba);
}

/**
+6 −0
Original line number Diff line number Diff line
@@ -647,6 +647,7 @@ struct ufs_stats {
 * @urgent_bkops_lvl: keeps track of urgent bkops level for device
 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
 *  device is known or not.
 * @scsi_block_reqs_cnt: reference counting for scsi block requests
 */
struct ufs_hba {
	void __iomem *mmio_base;
@@ -843,6 +844,8 @@ struct ufs_hba {

	/* If set, don't gate device ref_clk during clock gating */
	bool no_ref_clk_gating;

	int scsi_block_reqs_cnt;
};

/* Returns true if clocks can be gated. Otherwise false */
@@ -1037,6 +1040,9 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
		int result);
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);

void ufshcd_scsi_block_requests(struct ufs_hba *hba);
void ufshcd_scsi_unblock_requests(struct ufs_hba *hba);

/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{