Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2136ff5b authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "ARM: dts: msm: enable SDHCI PM QoS for msm8996 for SD card"

parents c57b8e5f 360ddc72
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -935,6 +935,12 @@
		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
						100000000 200000000 4294967295>;

		qcom,pm-qos-cpu-groups = <0x03 0x0c>;
		qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>;
		qcom,pm-qos-irq-type = "affine_cores";
		qcom,pm-qos-irq-cpu = <0>;
		qcom,pm-qos-irq-latency = <70 70>;

		status = "disabled";
	};

+7 −0
Original line number Diff line number Diff line
@@ -342,6 +342,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
			blk_cleanup_queue(mq->queue);
		} else {
			sema_init(&mq->thread_sem, 1);
			/* hook for pm qos cmdq init */
			if (card->host->cmdq_ops->init)
				card->host->cmdq_ops->init(card->host);
			mq->queue->queuedata = mq;
			card->host->cmdq_ctx.q = mq->queue;
			mq->thread = kthread_run(mmc_cmdq_thread, mq,
@@ -470,6 +473,10 @@ cur_sg_alloc_failed:
success:
	sema_init(&mq->thread_sem, 1);

	/* hook for pm qos legacy init */
	if (card->host->ops->init)
		card->host->ops->init(card->host);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

+47 −0
Original line number Diff line number Diff line
@@ -24,8 +24,12 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/sdhci.h>
#include <linux/workqueue.h>

#include "cmdq_hci.h"
#include "sdhci.h"
#include "sdhci-msm.h"

#define DCMD_SLOT 31
#define NUM_SLOTS 32
@@ -575,6 +579,21 @@ static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,

}

static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;

	sdhci_msm_pm_qos_cpu_vote(host,
		msm_host->pdata->pm_qos_data.cmdq_latency, mrq->req->cpu);
}

static void cmdq_pm_qos_unvote(struct sdhci_host *host, struct mmc_request *mrq)
{
	/* use async as we're inside an atomic context (soft-irq) */
	sdhci_msm_pm_qos_cpu_unvote(host, mrq->req->cpu, true);
}

static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	int err = 0;
@@ -582,6 +601,7 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
	u64 *task_desc = NULL;
	u32 tag = mrq->cmdq_req->tag;
	struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
	struct sdhci_host *host = mmc_priv(mmc);

	if (!cq_host->enabled) {
		pr_err("%s: CMDQ host not enabled yet !!!\n",
@@ -628,6 +648,9 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
	if (cq_host->ops->set_tranfer_params)
		cq_host->ops->set_tranfer_params(mmc);

	/* PM QoS */
	sdhci_msm_pm_qos_irq_vote(host);
	cmdq_pm_qos_vote(host, mrq);
ring_doorbell:
	/* Ensure the task descriptor list is flushed before ringing doorbell */
	wmb();
@@ -781,6 +804,7 @@ static void cmdq_post_req(struct mmc_host *host, struct mmc_request *mrq,
			  int err)
{
	struct mmc_data *data = mrq->data;
	struct sdhci_host *sdhci_host = mmc_priv(host);

	if (data) {
		data->error = err;
@@ -791,6 +815,10 @@ static void cmdq_post_req(struct mmc_host *host, struct mmc_request *mrq,
			data->bytes_xfered = 0;
		else
			data->bytes_xfered = blk_rq_bytes(mrq->req);

		/* we're in atomic context (soft-irq) so unvote async. */
		sdhci_msm_pm_qos_irq_unvote(sdhci_host, true);
		cmdq_pm_qos_unvote(sdhci_host, mrq);
	}
}

@@ -802,7 +830,26 @@ static void cmdq_dumpstate(struct mmc_host *mmc)
	cmdq_runtime_pm_put(cq_host);
}

static int cmdq_late_init(struct mmc_host *mmc)
{
	struct sdhci_host *host = mmc_priv(mmc);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;

	/*
	 * TODO: This should basically move to something like "sdhci-cmdq-msm"
	 * for msm specific implementation.
	 */
	sdhci_msm_pm_qos_irq_init(host);

	if (msm_host->pdata->pm_qos_data.cmdq_valid)
		sdhci_msm_pm_qos_cpu_init(host,
			msm_host->pdata->pm_qos_data.cmdq_latency);
	return 0;
}

static const struct mmc_cmdq_host_ops cmdq_host_ops = {
	.init = cmdq_late_init,
	.enable = cmdq_enable,
	.disable = cmdq_disable,
	.request = cmdq_request,
+268 −0
Original line number Diff line number Diff line
@@ -3141,6 +3141,271 @@ void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
	}
}

static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
{
	struct sdhci_msm_pm_qos_irq *pm_qos_irq =
		container_of(work, struct sdhci_msm_pm_qos_irq, unvote_work);

	if (atomic_read(&pm_qos_irq->counter))
		return;

	pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
	pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
}

void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	struct sdhci_msm_pm_qos_latency *latency =
		&msm_host->pdata->pm_qos_data.irq_latency;
	int counter;

	if (!msm_host->pm_qos_irq.enabled)
		return;

	counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
	/* Make sure to update the voting in case power policy has changed */
	if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
		&& counter > 1)
		return;

	cancel_work_sync(&msm_host->pm_qos_irq.unvote_work);
	msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
	pm_qos_update_request(&msm_host->pm_qos_irq.req,
				msm_host->pm_qos_irq.latency);
}

void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	int counter;

	if (!msm_host->pm_qos_irq.enabled)
		return;

	counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
	if (counter < 0) {
		pr_err("%s: counter=%d\n", __func__, counter);
		BUG();
	}
	if (counter)
		return;

	if (async) {
		schedule_work(&msm_host->pm_qos_irq.unvote_work);
		return;
	}

	msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
	pm_qos_update_request(&msm_host->pm_qos_irq.req,
			msm_host->pm_qos_irq.latency);
}

void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	struct sdhci_msm_pm_qos_latency *irq_latency;

	if (!msm_host->pdata->pm_qos_data.irq_valid)
		return;

	/* Initialize only once as this gets called per partition */
	if (msm_host->pm_qos_irq.enabled)
		return;

	atomic_set(&msm_host->pm_qos_irq.counter, 0);
	msm_host->pm_qos_irq.req.type =
			msm_host->pdata->pm_qos_data.irq_req_type;
	if (msm_host->pm_qos_irq.req.type == PM_QOS_REQ_AFFINE_IRQ)
		msm_host->pm_qos_irq.req.irq = host->irq;
	else
		cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
			cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));

	INIT_WORK(&msm_host->pm_qos_irq.unvote_work,
		sdhci_msm_pm_qos_irq_unvote_work);
	/* For initialization phase, set the performance latency */
	irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
	msm_host->pm_qos_irq.latency =
		irq_latency->latency[SDHCI_PERFORMANCE_MODE];
	pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
			msm_host->pm_qos_irq.latency);
	msm_host->pm_qos_irq.enabled = true;
}

static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
{
	int i;
	struct sdhci_msm_cpu_group_map *map =
			&msm_host->pdata->pm_qos_data.cpu_group_map;

	if (cpu < 0)
		goto not_found;

	for (i = 0; i < map->nr_groups; i++)
		if (cpumask_test_cpu(cpu, &map->mask[i]))
			return i;

not_found:
	return -EINVAL;
}

void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
		struct sdhci_msm_pm_qos_latency *latency, int cpu)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	int group = sdhci_msm_get_cpu_group(msm_host, cpu);
	struct sdhci_msm_pm_qos_group *pm_qos_group;
	int counter;

	if (!msm_host->pm_qos_group_enable || group < 0)
		return;

	pm_qos_group = &msm_host->pm_qos[group];
	counter = atomic_inc_return(&pm_qos_group->counter);

	/* Make sure to update the voting in case power policy has changed */
	if (pm_qos_group->latency == latency->latency[host->power_policy]
		&& counter > 1)
		return;

	cancel_work_sync(&pm_qos_group->unvote_work);

	pm_qos_group->latency = latency->latency[host->power_policy];
	pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
}

static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
{
	struct sdhci_msm_pm_qos_group *group =
		container_of(work, struct sdhci_msm_pm_qos_group, unvote_work);

	if (atomic_read(&group->counter))
		return;

	group->latency = PM_QOS_DEFAULT_VALUE;
	pm_qos_update_request(&group->req, group->latency);
}

bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	int group = sdhci_msm_get_cpu_group(msm_host, cpu);

	if (!msm_host->pm_qos_group_enable || group < 0 ||
		atomic_dec_return(&msm_host->pm_qos[group].counter))
		return false;

	if (async) {
		schedule_work(&msm_host->pm_qos[group].unvote_work);
		return true;
	}

	msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
	pm_qos_update_request(&msm_host->pm_qos[group].req,
				msm_host->pm_qos[group].latency);
	return true;
}

void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
		struct sdhci_msm_pm_qos_latency *latency)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
	struct sdhci_msm_pm_qos_group *group;
	int i;

	if (msm_host->pm_qos_group_enable)
		return;

	msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
			GFP_KERNEL);
	if (!msm_host->pm_qos)
		return;

	for (i = 0; i < nr_groups; i++) {
		group = &msm_host->pm_qos[i];
		INIT_WORK(&group->unvote_work,
			sdhci_msm_pm_qos_cpu_unvote_work);
		atomic_set(&group->counter, 0);
		group->req.type = PM_QOS_REQ_AFFINE_CORES;
		cpumask_copy(&group->req.cpus_affine,
			&msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
		/* For initialization phase, set the performance mode latency */
		group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
		pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
			group->latency);
		pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
			__func__, i,
			group->req.cpus_affine.bits[0],
			group->latency,
			&latency[i].latency[SDHCI_PERFORMANCE_MODE]);
	}
	msm_host->pm_qos_prev_cpu = -1;
	msm_host->pm_qos_group_enable = true;
}

static void sdhci_msm_pre_req(struct sdhci_host *host,
		struct mmc_request *mmc_req)
{
	int cpu;
	int group;
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;
	int prev_group = sdhci_msm_get_cpu_group(msm_host,
			msm_host->pm_qos_prev_cpu);

	sdhci_msm_pm_qos_irq_vote(host);

	cpu = get_cpu();
	put_cpu();
	group = sdhci_msm_get_cpu_group(msm_host, cpu);
	if (group < 0)
		return;

	if (group != prev_group && prev_group >= 0) {
		sdhci_msm_pm_qos_cpu_unvote(host,
				msm_host->pm_qos_prev_cpu, false);
		prev_group = -1; /* make sure to vote for new group */
	}

	if (prev_group < 0) {
		sdhci_msm_pm_qos_cpu_vote(host,
				msm_host->pdata->pm_qos_data.latency, cpu);
		msm_host->pm_qos_prev_cpu = cpu;
	}
}

static void sdhci_msm_post_req(struct sdhci_host *host,
				struct mmc_request *mmc_req)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;

	sdhci_msm_pm_qos_irq_unvote(host, false);

	if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
			msm_host->pm_qos_prev_cpu = -1;
}

static void sdhci_msm_init(struct sdhci_host *host)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = pltfm_host->priv;

	sdhci_msm_pm_qos_irq_init(host);

	if (msm_host->pdata->pm_qos_data.legacy_valid)
		sdhci_msm_pm_qos_cpu_init(host,
				msm_host->pdata->pm_qos_data.latency);
}

static struct sdhci_ops sdhci_msm_ops = {
	.crypto_engine_cfg = sdhci_msm_ice_cfg,
	.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
@@ -3164,6 +3429,9 @@ static struct sdhci_ops sdhci_msm_ops = {
	.detect = sdhci_msm_detect,
	.notify_load = sdhci_msm_notify_load,
	.reset_workaround = sdhci_msm_reset_workaround,
	.init = sdhci_msm_init,
	.pre_req = sdhci_msm_pre_req,
	.post_req = sdhci_msm_post_req,
};

static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
+35 −0
Original line number Diff line number Diff line
@@ -105,6 +105,26 @@ struct sdhci_msm_pm_qos_data {
	bool legacy_valid;
};

/*
 * PM QoS for group voting management - each cpu group defined is associated
 * with 1 instance of this structure.
 */
struct sdhci_msm_pm_qos_group {
	struct pm_qos_request req;
	struct work_struct unvote_work;
	atomic_t counter;
	s32 latency;
};

/* PM QoS HW IRQ voting */
struct sdhci_msm_pm_qos_irq {
	struct pm_qos_request req;
	struct work_struct unvote_work;
	atomic_t counter;
	s32 latency;
	bool enabled;
};

struct sdhci_msm_pltfm_data {
	/* Supported UHS-I Modes */
	u32 caps;
@@ -182,8 +202,23 @@ struct sdhci_msm_host {
	u32 caps_0;
	struct sdhci_msm_ice_data ice;
	u32 ice_clk_rate;
	struct sdhci_msm_pm_qos_group *pm_qos;
	int pm_qos_prev_cpu;
	bool pm_qos_group_enable;
	struct sdhci_msm_pm_qos_irq pm_qos_irq;
};

extern char *saved_command_line;

void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host);
void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host);
void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async);

void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
		struct sdhci_msm_pm_qos_latency *latency);
void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
		struct sdhci_msm_pm_qos_latency *latency, int cpu);
bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async);


#endif /* __SDHCI_MSM_H__ */
Loading