Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5dc9a74 authored by Asutosh Das's avatar Asutosh Das
Browse files

mmc: sdhci: delay the QoS vote removal



By delaying the QoS vote removal, there is some improvement
in performance in single threaded use cases.

Change-Id: I80545486057c55c697b72b56d57e2ea47cff86b9
Signed-off-by: default avatarAsutosh Das <asutoshd@codeaurora.org>
parent ac6766f8
Loading
Loading
Loading
Loading
+16 −10
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@
#include "sdhci-msm-ice.h"
#include "cmdq_hci.h"

#define QOS_REMOVE_DELAY_MS	10
#define CORE_POWER		0x0
#define CORE_SW_RST		(1 << 7)

@@ -3192,7 +3193,8 @@ void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
{
	struct sdhci_msm_pm_qos_irq *pm_qos_irq =
		container_of(work, struct sdhci_msm_pm_qos_irq, unvote_work);
		container_of(work, struct sdhci_msm_pm_qos_irq,
			     unvote_work.work);

	if (atomic_read(&pm_qos_irq->counter))
		return;
@@ -3218,7 +3220,7 @@ void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
		&& counter > 1)
		return;

	cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
	cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
	msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
	pm_qos_update_request(&msm_host->pm_qos_irq.req,
				msm_host->pm_qos_irq.latency);
@@ -3244,7 +3246,8 @@ void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
		return;

	if (async) {
		schedule_work(&msm_host->pm_qos_irq.unvote_work);
		schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
				      msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
		return;
	}

@@ -3299,7 +3302,7 @@ sdhci_msm_pm_qos_irq_enable_store(struct device *dev,

	msm_host->pm_qos_irq.enabled = enable;
	if (!enable) {
		cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
		cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
		atomic_set(&msm_host->pm_qos_irq.counter, 0);
		msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
		pm_qos_update_request(&msm_host->pm_qos_irq.req,
@@ -3345,7 +3348,7 @@ void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
		cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
			cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));

	INIT_WORK(&msm_host->pm_qos_irq.unvote_work,
	INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
		sdhci_msm_pm_qos_irq_unvote_work);
	/* For initialization phase, set the performance latency */
	irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
@@ -3439,7 +3442,8 @@ static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
	msm_host->pm_qos_group_enable = enable;
	if (!enable) {
		for (i = 0; i < nr_groups; i++) {
			cancel_work_sync(&msm_host->pm_qos[i].unvote_work);
			cancel_delayed_work_sync(
				&msm_host->pm_qos[i].unvote_work);
			atomic_set(&msm_host->pm_qos[i].counter, 0);
			msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
			pm_qos_update_request(&msm_host->pm_qos[i].req,
@@ -3488,7 +3492,7 @@ void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
		&& counter > 1)
		return;

	cancel_work_sync(&pm_qos_group->unvote_work);
	cancel_delayed_work_sync(&pm_qos_group->unvote_work);

	pm_qos_group->latency = latency->latency[host->power_policy];
	pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
@@ -3497,7 +3501,8 @@ void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
{
	struct sdhci_msm_pm_qos_group *group =
		container_of(work, struct sdhci_msm_pm_qos_group, unvote_work);
		container_of(work, struct sdhci_msm_pm_qos_group,
			     unvote_work.work);

	if (atomic_read(&group->counter))
		return;
@@ -3517,7 +3522,8 @@ bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
		return false;

	if (async) {
		schedule_work(&msm_host->pm_qos[group].unvote_work);
		schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
				      msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
		return true;
	}

@@ -3547,7 +3553,7 @@ void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,

	for (i = 0; i < nr_groups; i++) {
		group = &msm_host->pm_qos[i];
		INIT_WORK(&group->unvote_work,
		INIT_DELAYED_WORK(&group->unvote_work,
			sdhci_msm_pm_qos_cpu_unvote_work);
		atomic_set(&group->counter, 0);
		group->req.type = PM_QOS_REQ_AFFINE_CORES;
+2 −2
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ struct sdhci_msm_pm_qos_data {
 */
struct sdhci_msm_pm_qos_group {
	struct pm_qos_request req;
	struct work_struct unvote_work;
	struct delayed_work unvote_work;
	atomic_t counter;
	s32 latency;
};
@@ -119,7 +119,7 @@ struct sdhci_msm_pm_qos_group {
/* PM QoS HW IRQ voting */
struct sdhci_msm_pm_qos_irq {
	struct pm_qos_request req;
	struct work_struct unvote_work;
	struct delayed_work unvote_work;
	struct device_attribute enable_attr;
	struct device_attribute status_attr;
	atomic_t counter;