Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d53d3338 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "scsi: ufs: change clock gating timeout based on load"

parents 55a3dd71 e43c44f7
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1225,9 +1225,9 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
	struct ufs_qcom_host *host = ufshcd_get_variant(hba);

	hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
	hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
	hba->caps |= UFSHCD_CAP_CLK_SCALING;
	hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
	hba->caps |= UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;

	if (host->hw_ver.major >= 0x2) {
		hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
+278 −57
Original line number Diff line number Diff line
@@ -217,6 +217,9 @@ void ufshcd_update_query_stats(struct ufs_hba *hba,
/* default value of auto suspend is 3 seconds */
#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */

#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE	10
#define UFSHCD_CLK_GATING_DELAY_MS_PERF		50

/* IOCTL opcode for command - ufs set device read only */
#define UFS_IOCTL_BLKROSET      BLKROSET

@@ -372,6 +375,7 @@ static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void ufshcd_release_all(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -1185,8 +1189,6 @@ static void ufshcd_ungate_work(struct work_struct *work)
		hba->clk_gating.is_suspended = false;
	}
unblock_reqs:
	if (hba->clk_scaling.is_allowed)
		ufshcd_resume_clkscaling(hba);
	ufshcd_scsi_unblock_requests(hba);
}

@@ -1300,8 +1302,6 @@ static void ufshcd_gate_work(struct work_struct *work)
		ufshcd_set_link_hibern8(hba);
	}

	ufshcd_suspend_clkscaling(hba);

	if (!ufshcd_is_link_active(hba) && !hba->no_ref_clk_gating)
		ufshcd_disable_clocks(hba, true);
	else
@@ -1349,6 +1349,7 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)

	hba->clk_gating.state = REQ_CLKS_OFF;
	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);

	schedule_delayed_work(&hba->clk_gating.gate_work,
			      msecs_to_jiffies(hba->clk_gating.delay_ms));
}
@@ -1385,6 +1386,63 @@ static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
	return count;
}

static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct ufs_hba *hba = dev_get_drvdata(dev);

	return snprintf(buf, PAGE_SIZE, "%lu\n",
			hba->clk_gating.delay_ms_pwr_save);
}

static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct ufs_hba *hba = dev_get_drvdata(dev);
	unsigned long flags, value;

	if (kstrtoul(buf, 0, &value))
		return -EINVAL;

	spin_lock_irqsave(hba->host->host_lock, flags);

	hba->clk_gating.delay_ms_pwr_save = value;
	if (ufshcd_is_clkscaling_supported(hba) &&
	    !hba->clk_scaling.is_scaled_up)
		hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;

	spin_unlock_irqrestore(hba->host->host_lock, flags);
	return count;
}

static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct ufs_hba *hba = dev_get_drvdata(dev);

	return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
}

static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct ufs_hba *hba = dev_get_drvdata(dev);
	unsigned long flags, value;

	if (kstrtoul(buf, 0, &value))
		return -EINVAL;

	spin_lock_irqsave(hba->host->host_lock, flags);

	hba->clk_gating.delay_ms_perf = value;
	if (ufshcd_is_clkscaling_supported(hba) &&
	    hba->clk_scaling.is_scaled_up)
		hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;

	spin_unlock_irqrestore(hba->host->host_lock, flags);
	return count;
}

static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
@@ -1422,15 +1480,58 @@ out:

static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
	struct ufs_clk_gating *gating = &hba->clk_gating;

	if (!ufshcd_is_clkgating_allowed(hba))
		return;

	hba->clk_gating.delay_ms = 50;
	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
	INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
	INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);

	hba->clk_gating.is_enabled = true;
	gating->is_enabled = true;

	/*
	 * Scheduling the delayed work after 1 jiffies will make the work to
	 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
	 * for hibern8 enter work as it may impact the performance if it gets
	 * scheduled almost immediately. Hence make sure that hibern8 enter
	 * work gets scheduled atleast after 2 jiffies (any time between
	 * 1000/HZ ms to 2000/HZ ms).
	 */
	gating->delay_ms_pwr_save = jiffies_to_msecs(
		max_t(unsigned long,
		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
		      2));
	gating->delay_ms_perf = jiffies_to_msecs(
		max_t(unsigned long,
		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
		      2));

	/* start with performance mode */
	gating->delay_ms = gating->delay_ms_perf;

	if (!ufshcd_is_clkscaling_supported(hba))
		goto scaling_not_supported;

	gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
	gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
	sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
	gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
	gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
	if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");

	gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
	gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
	sysfs_attr_init(&gating->delay_perf_attr.attr);
	gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
	gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
	if (device_create_file(hba->dev, &gating->delay_perf_attr))
		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");

	goto add_clkgate_enable;

scaling_not_supported:
	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
	hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
	sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
@@ -1439,12 +1540,13 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
	if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");

	hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
	hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
	sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
	hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
	hba->clk_gating.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
	if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
add_clkgate_enable:
	gating->enable_attr.show = ufshcd_clkgate_enable_show;
	gating->enable_attr.store = ufshcd_clkgate_enable_store;
	sysfs_attr_init(&clk_gating->enable_attr.attr);
	gating->enable_attr.attr.name = "clkgate_enable";
	gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
	if (device_create_file(hba->dev, &gating->enable_attr))
		dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}

@@ -1452,7 +1554,13 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
	if (!ufshcd_is_clkgating_allowed(hba))
		return;
	if (ufshcd_is_clkscaling_supported(hba)) {
		device_remove_file(hba->dev,
				   &hba->clk_gating.delay_pwr_save_attr);
		device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
	} else {
		device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
	}
	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
	cancel_work_sync(&hba->clk_gating.ungate_work);
	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
@@ -1790,9 +1898,27 @@ static void ufshcd_release_all(struct ufs_hba *hba)
/* Must be called with host lock acquired */
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
	bool queue_resume_work = false;

	if (!ufshcd_is_clkscaling_supported(hba))
		return;

	if (!hba->clk_scaling.active_reqs++)
		queue_resume_work = true;

	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
		return;

	if (queue_resume_work)
		queue_work(hba->clk_scaling.workq,
			   &hba->clk_scaling.resume_work);

	if (!hba->clk_scaling.window_start_t) {
		hba->clk_scaling.window_start_t = jiffies;
		hba->clk_scaling.tot_busy_t = 0;
		hba->clk_scaling.is_busy_started = false;
	}

	if (!hba->clk_scaling.is_busy_started) {
		hba->clk_scaling.busy_start_t = ktime_get();
		hba->clk_scaling.is_busy_started = true;
@@ -4804,6 +4930,8 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
				complete(hba->dev_cmd.complete);
			}
		}
		if (ufshcd_is_clkscaling_supported(hba))
			hba->clk_scaling.active_reqs--;
	}
}

@@ -4848,6 +4976,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
				complete(hba->dev_cmd.complete);
			}
		}
		if (ufshcd_is_clkscaling_supported(hba))
			hba->clk_scaling.active_reqs--;
	}

	/* clear corresponding bits of completed commands */
@@ -6530,6 +6660,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
		memcpy(&hba->clk_scaling.saved_pwr_info.info, &hba->pwr_info,
		       sizeof(struct ufs_pa_layer_attr));
		hba->clk_scaling.saved_pwr_info.is_valid = true;
		hba->clk_scaling.is_scaled_up = true;
		ufshcd_resume_clkscaling(hba);
		hba->clk_scaling.is_allowed = true;
	}
@@ -7315,7 +7446,10 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
	if (hba->is_powered) {
		ufshcd_variant_hba_exit(hba);
		ufshcd_setup_vreg(hba, false);
		if (ufshcd_is_clkscaling_supported(hba)) {
			ufshcd_suspend_clkscaling(hba);
			destroy_workqueue(hba->clk_scaling.workq);
		}
		ufshcd_disable_clocks(hba, false);
		ufshcd_setup_hba_vreg(hba, false);
		hba->is_powered = false;
@@ -7599,7 +7733,11 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
	hba->clk_gating.is_suspended = true;
	hba->hibern8_on_idle.is_suspended = true;

	if (hba->clk_scaling.is_allowed) {
		cancel_work_sync(&hba->clk_scaling.suspend_work);
		cancel_work_sync(&hba->clk_scaling.resume_work);
		ufshcd_suspend_clkscaling(hba);
	}

	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
			req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -7681,6 +7819,7 @@ disable_clks:
	goto out;

set_link_active:
	if (hba->clk_scaling.is_allowed)
		ufshcd_resume_clkscaling(hba);
	ufshcd_vreg_set_hpm(hba);
	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
@@ -7693,6 +7832,7 @@ set_dev_active:
	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
		ufshcd_disable_auto_bkops(hba);
enable_gating:
	if (hba->clk_scaling.is_allowed)
		ufshcd_resume_clkscaling(hba);
	hba->hibern8_on_idle.is_suspended = false;
	hba->clk_gating.is_suspended = false;
@@ -7801,6 +7941,7 @@ disable_vreg:
	ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
	ufshcd_disable_irq(hba);
	if (hba->clk_scaling.is_allowed)
		ufshcd_suspend_clkscaling(hba);
	ufshcd_disable_clocks(hba, false);
out:
@@ -8326,7 +8467,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
		return ret;

	/* let's not get into low power until clock scaling is completed */
	ufshcd_hibern8_hold(hba, false);
	ufshcd_hold_all(hba);
	/* scale down the gear before scaling down clocks */
	if (!scale_up) {
		ret = ufshcd_scale_gear(hba, false);
@@ -8346,6 +8487,17 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
			goto out;
		}
	}

	if (!ret) {
		hba->clk_scaling.is_scaled_up = scale_up;
		if (scale_up)
			hba->clk_gating.delay_ms =
				hba->clk_gating.delay_ms_perf;
		else
			hba->clk_gating.delay_ms =
				hba->clk_gating.delay_ms_pwr_save;
	}

	goto out;

scale_up_gear:
@@ -8353,21 +8505,55 @@ scale_up_gear:
		ufshcd_scale_gear(hba, true);
out:
	ufshcd_clock_scaling_unprepare(hba);
	ufshcd_hibern8_release(hba, false);
	ufshcd_release_all(hba);
	return ret;
}

static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
	unsigned long flags;

	devfreq_suspend_device(hba->devfreq);
	spin_lock_irqsave(hba->host->host_lock, flags);
	hba->clk_scaling.window_start_t = 0;
	spin_unlock_irqrestore(hba->host->host_lock, flags);
}

static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
	unsigned long flags;
	bool suspend = false;

	if (!ufshcd_is_clkscaling_supported(hba))
		return;

	devfreq_suspend_device(hba->devfreq);
	hba->clk_scaling.window_start_t = 0;
	spin_lock_irqsave(hba->host->host_lock, flags);
	if (!hba->clk_scaling.is_suspended) {
		suspend = true;
		hba->clk_scaling.is_suspended = true;
	}
	spin_unlock_irqrestore(hba->host->host_lock, flags);

	if (suspend)
		__ufshcd_suspend_clkscaling(hba);
}

static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
	unsigned long flags;
	bool resume = false;

	if (!ufshcd_is_clkscaling_supported(hba))
		return;

	spin_lock_irqsave(hba->host->host_lock, flags);
	if (hba->clk_scaling.is_suspended) {
		resume = true;
		hba->clk_scaling.is_suspended = false;
	}
	spin_unlock_irqrestore(hba->host->host_lock, flags);

	if (resume)
		devfreq_resume_device(hba->devfreq);
}

@@ -8396,6 +8582,11 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
	pm_runtime_get_sync(hba->dev);
	ufshcd_hold(hba, false);

	cancel_work_sync(&hba->clk_scaling.suspend_work);
	cancel_work_sync(&hba->clk_scaling.resume_work);

	hba->clk_scaling.is_allowed = value;

	if (value) {
		ufshcd_resume_clkscaling(hba);
	} else {
@@ -8405,7 +8596,6 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
			dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
					__func__, err);
	}
	hba->clk_scaling.is_allowed = value;

	ufshcd_release(hba, false);
	pm_runtime_put_sync(hba->dev);
@@ -8413,6 +8603,40 @@ out:
	return count;
}

static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
	struct ufs_hba *hba = container_of(work, struct ufs_hba,
					   clk_scaling.suspend_work);
	unsigned long irq_flags;

	spin_lock_irqsave(hba->host->host_lock, irq_flags);
	if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
		return;
	}
	hba->clk_scaling.is_suspended = true;
	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);

	__ufshcd_suspend_clkscaling(hba);
}

static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
	struct ufs_hba *hba = container_of(work, struct ufs_hba,
					   clk_scaling.resume_work);
	unsigned long irq_flags;

	spin_lock_irqsave(hba->host->host_lock, irq_flags);
	if (!hba->clk_scaling.is_suspended) {
		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
		return;
	}
	hba->clk_scaling.is_suspended = false;
	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);

	devfreq_resume_device(hba->devfreq);
}

static int ufshcd_devfreq_target(struct device *dev,
				unsigned long *freq, u32 flags)
{
@@ -8420,7 +8644,7 @@ static int ufshcd_devfreq_target(struct device *dev,
	struct ufs_hba *hba = dev_get_drvdata(dev);
	unsigned long irq_flags;
	ktime_t start;
	bool scale_up, release_clk_hold = false;
	bool scale_up, sched_clk_scaling_suspend_work = false;

	if (!ufshcd_is_clkscaling_supported(hba))
		return -EINVAL;
@@ -8430,50 +8654,34 @@ static int ufshcd_devfreq_target(struct device *dev,
		return -EINVAL;
	}

	scale_up = (*freq == UINT_MAX) ? true : false;
	if (!ufshcd_is_devfreq_scaling_required(hba, scale_up))
		return 0; /* no state change required */

	spin_lock_irqsave(hba->host->host_lock, irq_flags);
	if (ufshcd_eh_in_progress(hba)) {
		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
		return 0;
	}

	if (ufshcd_is_clkgating_allowed(hba)) {
		if (cancel_delayed_work(&hba->clk_gating.gate_work) ||
		    (hba->clk_gating.state == CLKS_ON)) {
			/* hold the vote until the scaling work is completed */
			hba->clk_gating.active_reqs++;
			release_clk_hold = true;
			if (hba->clk_gating.state != CLKS_ON) {
				hba->clk_gating.state = CLKS_ON;
				trace_ufshcd_clk_gating(dev_name(hba->dev),
					hba->clk_gating.state);
			}
		} else {
			/*
			 * Clock gating work seems to be running in parallel
			 * hence skip scaling work to avoid deadlock between
			 * current scaling work and gating work.
			 */
	if (!hba->clk_scaling.active_reqs)
		sched_clk_scaling_suspend_work = true;

	scale_up = (*freq == UINT_MAX) ? true : false;
	if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
			return 0;
		}
		ret = 0;
		goto out; /* no state change required */
	}
	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);

	start = ktime_get();

	ret = ufshcd_devfreq_scale(hba, scale_up);

	if (release_clk_hold)
		ufshcd_release(hba, false);

	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
		(scale_up ? "up" : "down"),
		ktime_to_us(ktime_sub(ktime_get(), start)), ret);

out:
	if (sched_clk_scaling_suspend_work)
		queue_work(hba->clk_scaling.workq,
			   &hba->clk_scaling.suspend_work);

	return ret;
}

@@ -8701,6 +8909,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
	}

	if (ufshcd_is_clkscaling_supported(hba)) {
		char wq_name[sizeof("ufs_clkscaling_00")];

		hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
						   "simple_ondemand", gov_data);
		if (IS_ERR(hba->devfreq)) {
@@ -8708,6 +8918,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
					PTR_ERR(hba->devfreq));
			goto out_remove_scsi_host;
		}
		hba->clk_scaling.is_suspended = false;

		INIT_WORK(&hba->clk_scaling.suspend_work,
			  ufshcd_clk_scaling_suspend_work);
		INIT_WORK(&hba->clk_scaling.resume_work,
			  ufshcd_clk_scaling_resume_work);

		snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
			 host->host_no);
		hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);

		/* Suspend devfreq until the UFS device is detected */
		ufshcd_suspend_clkscaling(hba);
		ufshcd_clkscaling_init_sysfs(hba);
+39 −6
Original line number Diff line number Diff line
@@ -390,10 +390,14 @@ enum clk_gating_state {
 * @ungate_work: worker to turn on clocks that will be used in case of
 * interrupt context
 * @state: the current clocks state
 * @delay_ms: gating delay in ms
 * @delay_ms: current gating delay in ms
 * @delay_ms_pwr_save: gating delay (in ms) in power save mode
 * @delay_ms_perf: gating delay (in ms) in performance mode
 * @is_suspended: clk gating is suspended when set to 1 which can be used
 * during suspend/resume
 * @delay_attr: sysfs attribute to control delay_attr
 * @delay_attr: sysfs attribute to control delay_ms if clock scaling is disabled
 * @delay_pwr_save_attr: sysfs attribute to control delay_ms_pwr_save
 * @delay_perf_attr: sysfs attribute to control delay_ms_perf
 * @enable_attr: sysfs attribute to enable/disable clock gating
 * @is_enabled: Indicates the current status of clock gating
 * @active_reqs: number of requests that are pending and should be waited for
@@ -404,8 +408,12 @@ struct ufs_clk_gating {
	struct work_struct ungate_work;
	enum clk_gating_state state;
	unsigned long delay_ms;
	unsigned long delay_ms_pwr_save;
	unsigned long delay_ms_perf;
	bool is_suspended;
	struct device_attribute delay_attr;
	struct device_attribute delay_pwr_save_attr;
	struct device_attribute delay_perf_attr;
	struct device_attribute enable_attr;
	bool is_enabled;
	int active_reqs;
@@ -451,14 +459,39 @@ struct ufs_saved_pwr_info {
	bool is_valid;
};

/**
 * struct ufs_clk_scaling - UFS clock scaling related data
 * @active_reqs: number of requests that are pending. If this is zero when
 * devfreq ->target() function is called then schedule "suspend_work" to
 * suspend devfreq.
 * @tot_busy_t: Total busy time in current polling window
 * @window_start_t: Start time (in jiffies) of the current polling window
 * @busy_start_t: Start time of current busy period
 * @enable_attr: sysfs attribute to enable/disable clock scaling
 * @saved_pwr_info: UFS power mode may also be changed during scaling and this
 * one keeps track of previous power mode.
 * @workq: workqueue to schedule devfreq suspend/resume work
 * @suspend_work: worker to suspend devfreq
 * @resume_work: worker to resume devfreq
 * @is_allowed: tracks if scaling is currently allowed or not
 * @is_busy_started: tracks if busy period has started or not
 * @is_suspended: tracks if devfreq is suspended or not
 * @is_scaled_up: tracks if we are currently scaled up or scaled down
 */
struct ufs_clk_scaling {
	ktime_t  busy_start_t;
	bool is_busy_started;
	int active_reqs;
	unsigned long tot_busy_t;
	unsigned long window_start_t;
	ktime_t busy_start_t;
	struct device_attribute enable_attr;
	bool is_allowed;
	struct ufs_saved_pwr_info saved_pwr_info;
	struct workqueue_struct *workq;
	struct work_struct suspend_work;
	struct work_struct resume_work;
	bool is_allowed;
	bool is_busy_started;
	bool is_suspended;
	bool is_scaled_up;
};

/**