Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5ccba63a authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "scsi: ufs: Porting UFS driver's changes from msm-4.14"

parents 5c5f67ae e0c627d0
Loading
Loading
Loading
Loading
+69 −13
Original line number Original line Diff line number Diff line
@@ -22,6 +22,7 @@
#include "ufs-debugfs.h"
#include "ufs-debugfs.h"
#include "unipro.h"
#include "unipro.h"
#include "ufshci.h"
#include "ufshci.h"
#include "ufshcd.h"


enum field_width {
enum field_width {
	BYTE	= 1,
	BYTE	= 1,
@@ -1074,9 +1075,7 @@ static int ufsdbg_power_mode_show(struct seq_file *file, void *data)
		"L - number of lanes\n"
		"L - number of lanes\n"
		"M - power mode:\n"
		"M - power mode:\n"
		"\t1 = fast mode\n"
		"\t1 = fast mode\n"
		"\t2 = slow mode\n"
		"\t4 = fast-auto mode\n"
		"\t4 = fast-auto mode\n"
		"\t5 = slow-auto mode\n"
		"first letter is for RX, second letter is for TX.\n\n");
		"first letter is for RX, second letter is for TX.\n\n");


	return 0;
	return 0;
@@ -1084,16 +1083,14 @@ static int ufsdbg_power_mode_show(struct seq_file *file, void *data)


static bool ufsdbg_power_mode_validate(struct ufs_pa_layer_attr *pwr_mode)
static bool ufsdbg_power_mode_validate(struct ufs_pa_layer_attr *pwr_mode)
{
{
	if (pwr_mode->gear_rx < UFS_PWM_G1 || pwr_mode->gear_rx > UFS_PWM_G7 ||
	if (pwr_mode->gear_rx < UFS_HS_G1 || pwr_mode->gear_rx > UFS_HS_G3 ||
	    pwr_mode->gear_tx < UFS_PWM_G1 || pwr_mode->gear_tx > UFS_PWM_G7 ||
	    pwr_mode->gear_tx < UFS_HS_G1 || pwr_mode->gear_tx > UFS_HS_G3 ||
	    pwr_mode->lane_rx < 1 || pwr_mode->lane_rx > 2 ||
	    pwr_mode->lane_rx < 1 || pwr_mode->lane_rx > 2 ||
	    pwr_mode->lane_tx < 1 || pwr_mode->lane_tx > 2 ||
	    pwr_mode->lane_tx < 1 || pwr_mode->lane_tx > 2 ||
	    (pwr_mode->pwr_rx != FAST_MODE && pwr_mode->pwr_rx != SLOW_MODE &&
	    (pwr_mode->pwr_rx != FAST_MODE &&
	     pwr_mode->pwr_rx != FASTAUTO_MODE &&
	     pwr_mode->pwr_rx != FASTAUTO_MODE) ||
	     pwr_mode->pwr_rx != SLOWAUTO_MODE) ||
	    (pwr_mode->pwr_tx != FAST_MODE &&
	    (pwr_mode->pwr_tx != FAST_MODE && pwr_mode->pwr_tx != SLOW_MODE &&
	     pwr_mode->pwr_tx != FASTAUTO_MODE)) {
	     pwr_mode->pwr_tx != FASTAUTO_MODE &&
	     pwr_mode->pwr_tx != SLOWAUTO_MODE)) {
		pr_err("%s: power parameters are not valid\n", __func__);
		pr_err("%s: power parameters are not valid\n", __func__);
		return false;
		return false;
	}
	}
@@ -1199,14 +1196,67 @@ static int ufsdbg_cfg_pwr_param(struct ufs_hba *hba,
static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
		struct ufs_pa_layer_attr *desired_pwr_mode)
		struct ufs_pa_layer_attr *desired_pwr_mode)
{
{
	int ret;
	int ret = 0;
	bool scale_up = false;
	u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);


	pm_runtime_get_sync(hba->dev);
	pm_runtime_get_sync(hba->dev);
	/* let's not get into low power until clock scaling is completed */
	hba->ufs_stats.clk_hold.ctx = DBGFS_CFG_PWR_MODE;
	ufshcd_hold(hba, false);
	ufshcd_scsi_block_requests(hba);
	ufshcd_scsi_block_requests(hba);
	ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
	down_write(&hba->lock);
	if (!ret)
	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
		ret = -EBUSY;
		goto out;
	}

	/* Gear scaling needs to be taken care of along with clk scaling */
	if (desired_pwr_mode->gear_tx != hba->pwr_info.gear_tx ||
	    desired_pwr_mode->gear_rx != hba->pwr_info.gear_rx) {

		if (desired_pwr_mode->gear_tx > scale_down_gear ||
		    desired_pwr_mode->gear_rx > scale_down_gear)
			scale_up = true;

		if (!scale_up) {
			ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
			ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
			if (ret)
				goto out;
		}

		/*
		 * If auto hibern8 is supported then put the link in
		 * hibern8 manually, this is to avoid auto hibern8
		 * racing during clock frequency scaling sequence.
		 */
		if (ufshcd_is_auto_hibern8_supported(hba) &&
		    hba->hibern8_on_idle.is_enabled) {
			ret = ufshcd_uic_hibern8_enter(hba);
			if (ret)
				goto out;
		}

		ret = ufshcd_scale_clks(hba, scale_up);
		if (ret)
			goto out;

		if (ufshcd_is_auto_hibern8_supported(hba) &&
		    hba->hibern8_on_idle.is_enabled)
			ret = ufshcd_uic_hibern8_exit(hba);

		if (scale_up) {
			ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
			if (ret)
				ufshcd_scale_clks(hba, false);
		}
	} else {
		ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
	}
out:
	up_write(&hba->lock);
	ufshcd_scsi_unblock_requests(hba);
	ufshcd_scsi_unblock_requests(hba);
	ufshcd_release(hba, false);
	pm_runtime_put_sync(hba->dev);
	pm_runtime_put_sync(hba->dev);


	return ret;
	return ret;
@@ -1718,6 +1768,12 @@ void ufsdbg_add_debugfs(struct ufs_hba *hba)
		goto err;
		goto err;
	}
	}


	if (!debugfs_create_bool("crash_on_err",
		0600, hba->debugfs_files.debugfs_root,
		&hba->crash_on_err))
		goto err;


	ufsdbg_setup_fault_injection(hba);
	ufsdbg_setup_fault_injection(hba);


	ufshcd_vops_add_debugfs(hba, hba->debugfs_files.debugfs_root);
	ufshcd_vops_add_debugfs(hba, hba->debugfs_files.debugfs_root);
+37 −8
Original line number Original line Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-License-Identifier: GPL-2.0-only
/*
/*
 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
 *
 *
 * This program is free software; you can redistribute it and/or modify
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * it under the terms of the GNU General Public License version 2 and
@@ -226,14 +226,17 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
	}
	}


	qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
	qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
	if (!ice_workqueue) {
		ice_workqueue = alloc_workqueue("ice-set-key",
		ice_workqueue = alloc_workqueue("ice-set-key",
			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
			WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
		if (!ice_workqueue) {
		if (!ice_workqueue) {
			dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
			dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
			__func__);
			__func__);
			err = -ENOMEM;
			goto out;
			goto out;
		}
		}
		INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
		INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
	}


out:
out:
	return err;
	return err;
@@ -286,6 +289,17 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
			 * propagate so it will be re-queued.
			 * propagate so it will be re-queued.
			 */
			 */
			if (err == -EAGAIN) {
			if (err == -EAGAIN) {
				if (!ice_workqueue) {
					spin_unlock_irqrestore(
					&qcom_host->ice_work_lock,
					flags);

					dev_err(qcom_host->hba->dev,
						"%s: error %d workqueue NULL\n",
						__func__, err);
					return -EINVAL;
				}

				dev_dbg(qcom_host->hba->dev,
				dev_dbg(qcom_host->hba->dev,
					"%s: scheduling task for ice setup\n",
					"%s: scheduling task for ice setup\n",
					__func__);
					__func__);
@@ -369,8 +383,13 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
		return -EINVAL;
		return -EINVAL;
	}
	}


	if (qcom_host->hw_ver.major == 0x3) {
	if (qcom_host->hw_ver.major >= 0x3) {
		/* nothing to do here for version 0x3, exit silently */
		/*
		 * ICE 3.0 crypto sequences were changed,
		 * CTRL_INFO register no longer exists
		 * and doesn't need to be configured.
		 * The configuration is done via utrd.
		 */
		return 0;
		return 0;
	}
	}


@@ -405,6 +424,16 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
			 * propagate so it will be re-queued.
			 * propagate so it will be re-queued.
			 */
			 */
			if (err == -EAGAIN) {
			if (err == -EAGAIN) {
				if (!ice_workqueue) {
					spin_unlock_irqrestore(
					&qcom_host->ice_work_lock,
					flags);

					dev_err(qcom_host->hba->dev,
						"%s: error %d workqueue NULL\n",
						__func__, err);
					return -EINVAL;
				}


				dev_dbg(qcom_host->hba->dev,
				dev_dbg(qcom_host->hba->dev,
					"%s: scheduling task for ice setup\n",
					"%s: scheduling task for ice setup\n",
+28 −16
Original line number Original line Diff line number Diff line
/*
/*
 * Copyright (c) 2013-2018, Linux Foundation. All rights reserved.
 * Copyright (c) 2013-2019, Linux Foundation. All rights reserved.
 *
 *
 * This program is free software; you can redistribute it and/or modify
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * it under the terms of the GNU General Public License version 2 and
@@ -764,6 +764,11 @@ static int ufs_qcom_config_vreg(struct device *dev,


	reg = vreg->reg;
	reg = vreg->reg;
	if (regulator_count_voltages(reg) > 0) {
	if (regulator_count_voltages(reg) > 0) {
		uA_load = on ? vreg->max_uA : 0;
		ret = regulator_set_load(vreg->reg, uA_load);
		if (ret)
			goto out;

		min_uV = on ? vreg->min_uV : 0;
		min_uV = on ? vreg->min_uV : 0;
		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
		if (ret) {
		if (ret) {
@@ -771,11 +776,6 @@ static int ufs_qcom_config_vreg(struct device *dev,
					__func__, vreg->name, ret);
					__func__, vreg->name, ret);
			goto out;
			goto out;
		}
		}

		uA_load = on ? vreg->max_uA : 0;
		ret = regulator_set_load(vreg->reg, uA_load);
		if (ret)
			goto out;
	}
	}
out:
out:
	return ret;
	return ret;
@@ -939,9 +939,15 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
		return 0;
		return 0;


	/* Use request LBA as the DUN value */
	/* Use request LBA as the DUN value */
	if (req->bio)
	if (req->bio) {
		*dun = (req->bio->bi_iter.bi_sector) >>
		if (bio_dun(req->bio)) {
				UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
			/* dun @bio can be split, so we have to adjust offset */
			*dun = bio_dun(req->bio);
		} else {
			*dun = req->bio->bi_iter.bi_sector;
			*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
		}
	}


	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);


@@ -1337,11 +1343,11 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
		/*
		/*
		 * If we are here to disable this clock it might be immediately
		 * If we are here to disable this clock it might be immediately
		 * after entering into hibern8 in which case we need to make
		 * after entering into hibern8 in which case we need to make
		 * sure that device ref_clk is active at least 1us after the
		 * sure that device ref_clk is active for a given time after
		 * hibern8 enter.
		 * enter hibern8
		 */
		 */
		if (!enable)
		if (!enable)
			udelay(1);
			udelay(host->hba->dev_ref_clk_gating_wait);


		writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
		writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);


@@ -1350,11 +1356,16 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)


		/*
		/*
		 * If we call hibern8 exit after this, we need to make sure that
		 * If we call hibern8 exit after this, we need to make sure that
		 * device ref_clk is stable for at least 1us before the hibern8
		 * device ref_clk is stable for a given time before the hibern8
		 * exit command.
		 * exit command.
		 */
		 */
		if (enable)
		if (enable) {
			if (host->hba->dev_info.quirks &
			    UFS_DEVICE_QUIRK_WAIT_AFTER_REF_CLK_UNGATE)
				usleep_range(50, 60);
			else
				udelay(1);
				udelay(1);
		}


		host->is_dev_ref_clk_enabled = enable;
		host->is_dev_ref_clk_enabled = enable;
	}
	}
@@ -1614,7 +1625,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
		 * If auto hibern8 is supported then the link will already
		 * If auto hibern8 is supported then the link will already
		 * be in hibern8 state and the ref clock can be gated.
		 * be in hibern8 state and the ref clock can be gated.
		 */
		 */
		if (ufshcd_is_auto_hibern8_supported(hba) ||
		if ((ufshcd_is_auto_hibern8_supported(hba) &&
		     hba->hibern8_on_idle.is_enabled) ||
		    !ufs_qcom_is_link_active(hba)) {
		    !ufs_qcom_is_link_active(hba)) {
			/* disable device ref_clk */
			/* disable device ref_clk */
			ufs_qcom_dev_ref_clk_ctrl(host, false);
			ufs_qcom_dev_ref_clk_ctrl(host, false);
+3 −1
Original line number Original line Diff line number Diff line
@@ -517,7 +517,7 @@ struct ufs_query_res {
#define UFS_VREG_VCC_1P8_MAX_UV    1950000 /* uV */
#define UFS_VREG_VCC_1P8_MAX_UV    1950000 /* uV */
#define UFS_VREG_VCCQ_MIN_UV	   1100000 /* uV */
#define UFS_VREG_VCCQ_MIN_UV	   1100000 /* uV */
#define UFS_VREG_VCCQ_MAX_UV	   1300000 /* uV */
#define UFS_VREG_VCCQ_MAX_UV	   1300000 /* uV */
#define UFS_VREG_VCCQ2_MIN_UV	   1650000 /* uV */
#define UFS_VREG_VCCQ2_MIN_UV	   1750000 /* uV */
#define UFS_VREG_VCCQ2_MAX_UV	   1950000 /* uV */
#define UFS_VREG_VCCQ2_MAX_UV	   1950000 /* uV */


/*
/*
@@ -557,6 +557,7 @@ struct ufs_dev_info {
	u8	b_device_sub_class;
	u8	b_device_sub_class;
	u16	w_manufacturer_id;
	u16	w_manufacturer_id;
	u8	i_product_name;
	u8	i_product_name;
	u16	w_spec_version;


	/* query flags */
	/* query flags */
	bool f_power_on_wp_en;
	bool f_power_on_wp_en;
@@ -580,6 +581,7 @@ struct ufs_dev_info {
struct ufs_dev_desc {
struct ufs_dev_desc {
	u16 wmanufacturerid;
	u16 wmanufacturerid;
	char model[MAX_MODEL_LEN + 1];
	char model[MAX_MODEL_LEN + 1];
	u16 wspecversion;
};
};


/**
/**
+11 −2
Original line number Original line Diff line number Diff line
@@ -138,10 +138,19 @@ struct ufs_dev_fix {
 * HS-G1 to HS-G2 followed by HS-G2 to HS-G3. Enabling this quirk for such
 * HS-G1 to HS-G2 followed by HS-G2 to HS-G3. Enabling this quirk for such
 * device would apply this 2 steps gear switch workaround.
 * device would apply this 2 steps gear switch workaround.
 */
 */
#define UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH (1 << 8)
#define UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH	(1 << 9)

 /* Some UFS devices require VS_DebugSaveConfigTime is 0x10,
 /* Some UFS devices require VS_DebugSaveConfigTime is 0x10,
 * enabling this quirk ensure this.
 * enabling this quirk ensure this.
 */
 */
#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME	(1 << 9)
#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME	(1 << 10)

/*
 * Some UFS devices need more delay after device reference clk is turned on
 * but before initiation of the state transition to STALL from a LS-MODE or
 * from the HIBERN8 state. Enable this quirk to give UFS devices 50us delay
 * instead of the default delay.
 */
#define UFS_DEVICE_QUIRK_WAIT_AFTER_REF_CLK_UNGATE	(1 << 11)


#endif /* UFS_QUIRKS_H_ */
#endif /* UFS_QUIRKS_H_ */
Loading