Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 349f41a0 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mmc: sdhci-msm: Update DDR_CONFIG reg with HSR value if supplied"

parents 8b06ac70 20f3b4c8
Loading
Loading
Loading
Loading
+612 −78
Original line number Original line Diff line number Diff line
@@ -14,6 +14,11 @@
#include <linux/interconnect.h>
#include <linux/interconnect.h>
#include <linux/iopoll.h>
#include <linux/iopoll.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_qos.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/of.h>


#include "sdhci-pltfm.h"
#include "sdhci-pltfm.h"
#include "cqhci.h"
#include "cqhci.h"
@@ -126,6 +131,8 @@
#define CMUX_SHIFT_PHASE_MASK	(7 << CMUX_SHIFT_PHASE_SHIFT)
#define CMUX_SHIFT_PHASE_MASK	(7 << CMUX_SHIFT_PHASE_SHIFT)


#define MSM_MMC_AUTOSUSPEND_DELAY_MS	50
#define MSM_MMC_AUTOSUSPEND_DELAY_MS	50
#define MSM_PMQOS_UNVOTING_DELAY_MS	10 /* msec */
#define MSM_CLK_GATING_DELAY_MS		200 /* msec */


/* Timeout value to avoid infinite waiting for pwr_irq */
/* Timeout value to avoid infinite waiting for pwr_irq */
#define MSM_PWR_IRQ_TIMEOUT_MS 5000
#define MSM_PWR_IRQ_TIMEOUT_MS 5000
@@ -359,6 +366,30 @@ struct sdhci_msm_vreg_data {
	struct sdhci_msm_reg_data *vdd_io_data;
	struct sdhci_msm_reg_data *vdd_io_data;
};
};


/* Per cpu cluster qos group */
struct qos_cpu_group {
	cpumask_t mask;	/* CPU mask of cluster */
	unsigned int *votes;	/* Different votes for cluster */
	struct dev_pm_qos_request *qos_req;	/* Pointer to host qos request*/
	bool voted;
	struct sdhci_msm_host *host;
	bool initialized;
	bool curr_vote;
};

/* Per host qos request structure */
struct sdhci_msm_qos_req {
	struct qos_cpu_group *qcg;	/* CPU group per host */
	unsigned int num_groups;	/* Number of groups */
	unsigned int active_mask;	/* Active affine irq mask */
};

enum constraint {
	QOS_PERF,
	QOS_POWER,
	QOS_MAX,
};

struct sdhci_msm_host {
struct sdhci_msm_host {
	struct platform_device *pdev;
	struct platform_device *pdev;
	void __iomem *core_mem;	/* MSM SDCC mapped address */
	void __iomem *core_mem;	/* MSM SDCC mapped address */
@@ -389,15 +420,39 @@ struct sdhci_msm_host {
	bool skip_bus_bw_voting;
	bool skip_bus_bw_voting;
	struct sdhci_msm_bus_vote_data *bus_vote_data;
	struct sdhci_msm_bus_vote_data *bus_vote_data;
	struct delayed_work bus_vote_work;
	struct delayed_work bus_vote_work;
	struct delayed_work pmqos_unvote_work;
	struct delayed_work clk_gating_work;
	bool pltfm_init_done;
	bool pltfm_init_done;
	bool core_3_0v_support;
	bool core_3_0v_support;
	bool use_7nm_dll;
	bool use_7nm_dll;
	struct sdhci_msm_dll_hsr *dll_hsr;
	struct sdhci_msm_dll_hsr *dll_hsr;
	struct sdhci_msm_regs_restore regs_restore;
	struct sdhci_msm_regs_restore regs_restore;
	struct workqueue_struct *workq;	/* QoS work queue */
	struct sdhci_msm_qos_req *sdhci_qos;
	struct irq_affinity_notify affinity_notify;
	struct device_attribute clk_gating;
	struct device_attribute pm_qos;
	u32 clk_gating_delay;
	u32 pm_qos_delay;
};
};


#define	ANDROID_BOOT_DEV_MAX	30
static char android_boot_dev[ANDROID_BOOT_DEV_MAX];

#ifndef MODULE
static int __init get_android_boot_dev(char *str)
{
	strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
	return 1;
}
__setup("androidboot.bootdevice=", get_android_boot_dev);
#endif

static struct sdhci_msm_host *sdhci_slot[2];
static struct sdhci_msm_host *sdhci_slot[2];


static int sdhci_msm_update_qos_constraints(struct qos_cpu_group *qcg,
					enum constraint type);

static void sdhci_msm_bus_voting(struct sdhci_host *host, bool enable);
static void sdhci_msm_bus_voting(struct sdhci_host *host, bool enable);


static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
@@ -1143,7 +1198,13 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
		ddr_cfg_offset = msm_offset->core_ddr_config;
		ddr_cfg_offset = msm_offset->core_ddr_config;
	else
	else
		ddr_cfg_offset = msm_offset->core_ddr_config_old;
		ddr_cfg_offset = msm_offset->core_ddr_config_old;
	writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr + ddr_cfg_offset);

	if (msm_host->dll_hsr->ddr_config)
		config = msm_host->dll_hsr->ddr_config;
	else
		config = DDR_CONFIG_POR_VAL;

	writel_relaxed(config, host->ioaddr + ddr_cfg_offset);


	if (mmc->ios.enhanced_strobe) {
	if (mmc->ios.enhanced_strobe) {
		config = readl_relaxed(host->ioaddr +
		config = readl_relaxed(host->ioaddr +
@@ -1169,9 +1230,21 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
		goto out;
		goto out;
	}
	}


	config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3);
	/*
	 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
	 * When MCLK is gated OFF, it is not gated for less than 0.5us
	 * and MCLK must be switched on for at-least 1us before DATA
	 * starts coming. Controllers with 14lpp and later tech DLL cannot
	 * guarantee above requirement. So PWRSAVE_DLL should not be
	 * turned on for host controllers using this DLL.
	 */
	if (!msm_host->use_14lpp_dll_reset) {
		config = readl_relaxed(host->ioaddr +
				msm_offset->core_vendor_spec3);
		config |= CORE_PWRSAVE_DLL;
		config |= CORE_PWRSAVE_DLL;
	writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3);
		writel_relaxed(config, host->ioaddr +
				msm_offset->core_vendor_spec3);
	}


	/*
	/*
	 * Drain writebuffer to ensure above DLL calibration
	 * Drain writebuffer to ensure above DLL calibration
@@ -2228,8 +2301,14 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)


	msm_set_clock_rate_for_bus_mode(host, clock);
	msm_set_clock_rate_for_bus_mode(host, clock);
out:
out:
	if (!msm_host->skip_bus_bw_voting)
	/* Vote on bus only with clock frequency or when changing clock
		sdhci_msm_bus_voting(host, !!clock);
	 * frequency. No need to vote when setting clock frequency as 0
	 * because after setting clock at 0, we release host, which will
	 * eventually call host runtime suspend and unvoting would be
	 * taken care in runtime suspend call.
	 */
	if (!msm_host->skip_bus_bw_voting && clock)
		sdhci_msm_bus_voting(host, true);
	__sdhci_msm_set_clock(host, clock);
	__sdhci_msm_set_clock(host, clock);
}
}


@@ -2598,33 +2677,11 @@ static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
	return cpu_rc;
	return cpu_rc;
}
}


/*
 * Internal work. Work to set 0 bandwidth for msm bus.
 */
static void sdhci_msm_bus_work(struct work_struct *work)
{
	struct sdhci_msm_host *msm_host;
	struct sdhci_host *host;

	msm_host = container_of(work, struct sdhci_msm_host,
				bus_vote_work.work);
	host =  platform_get_drvdata(msm_host->pdev);

	if (!msm_host->bus_vote_data->sdhc_ddr ||
			!msm_host->bus_vote_data->cpu_sdhc)
		return;
	/* don't vote for 0 bandwidth if any request is in progress */
	if (!host->mmc->ongoing_mrq)
		sdhci_msm_bus_set_vote(msm_host, 0);
	else
		pr_debug("Transfer in progress. Skipping bus voting to 0\n");
}

/*
/*
 * This function cancels any scheduled delayed work and sets the bus
 * This function cancels any scheduled delayed work and sets the bus
 * vote based on bw (bandwidth) argument.
 * vote based on bw (bandwidth) argument.
 */
 */
static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
static void sdhci_msm_bus_get_and_set_vote(struct sdhci_host *host,
						unsigned int bw)
						unsigned int bw)
{
{
	int vote;
	int vote;
@@ -2635,30 +2692,10 @@ static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
		!msm_host->bus_vote_data->sdhc_ddr ||
		!msm_host->bus_vote_data->sdhc_ddr ||
		!msm_host->bus_vote_data->cpu_sdhc)
		!msm_host->bus_vote_data->cpu_sdhc)
		return;
		return;
	cancel_delayed_work_sync(&msm_host->bus_vote_work);
	vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
	vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
	sdhci_msm_bus_set_vote(msm_host, vote);
	sdhci_msm_bus_set_vote(msm_host, vote);
}
}


#define MSM_MMC_BUS_VOTING_DELAY	200 /* msecs */
#define VOTE_ZERO  0

/*
 * This function queues a work which will set the bandwidth
 * requirement to 0.
 */
static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
{
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);

	if (msm_host->bus_vote_data &&
		msm_host->bus_vote_data->curr_vote != VOTE_ZERO)
		queue_delayed_work(system_wq,
				   &msm_host->bus_vote_work,
				   msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
}

static struct sdhci_msm_bus_vote_data *sdhci_msm_get_bus_vote_data(struct device
static struct sdhci_msm_bus_vote_data *sdhci_msm_get_bus_vote_data(struct device
				       *dev, struct sdhci_msm_host *host)
				       *dev, struct sdhci_msm_host *host)


@@ -2778,8 +2815,6 @@ static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
		return ret;
		return ret;
	}
	}


	INIT_DELAYED_WORK(&host->bus_vote_work, sdhci_msm_bus_work);

	return ret;
	return ret;
}
}


@@ -2799,9 +2834,9 @@ static void sdhci_msm_bus_voting(struct sdhci_host *host, bool enable)


	if (enable) {
	if (enable) {
		bw = sdhci_get_bw_required(host, ios);
		bw = sdhci_get_bw_required(host, ios);
		sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
		sdhci_msm_bus_get_and_set_vote(host, bw);
	} else
	} else
		sdhci_msm_bus_queue_work(host);
		sdhci_msm_bus_get_and_set_vote(host, 0);
}
}


/*****************************************************************************\
/*****************************************************************************\
@@ -3067,6 +3102,450 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
		msm_host->use_7nm_dll = true;
		msm_host->use_7nm_dll = true;
}
}


static void sdhci_msm_clkgate_bus_delayed_work(struct work_struct *work)
{
	struct sdhci_msm_host *msm_host = container_of(work,
			struct sdhci_msm_host, clk_gating_work.work);
	struct sdhci_host *host = mmc_priv(msm_host->mmc);

	sdhci_msm_registers_save(host);
	clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
					msm_host->bulk_clks);
	sdhci_msm_bus_voting(host, false);
}

/* Find cpu group qos from a given cpu */
static struct qos_cpu_group *cpu_to_group(struct sdhci_msm_qos_req *r, int cpu)
{
	int i;
	struct qos_cpu_group *g = r->qcg;

	if (cpu < 0 || cpu > num_possible_cpus())
		return NULL;

	for (i = 0; i < r->num_groups; i++, g++) {
		if (cpumask_test_cpu(cpu, &g->mask))
			return &r->qcg[i];
	}

	return NULL;
}

/*
 * Function to put qos vote. This takes qos cpu group of
 * host and type of vote as input
 */
static int sdhci_msm_update_qos_constraints(struct qos_cpu_group *qcg,
							enum constraint type)
{
	unsigned int vote;
	int cpu, err;
	struct dev_pm_qos_request *qos_req = qcg->qos_req;

	if (type == QOS_MAX)
		vote = S32_MAX;
	else
		vote = qcg->votes[type];

	if (qcg->curr_vote == vote)
		return 0;

	for_each_cpu(cpu, &qcg->mask) {
		err = dev_pm_qos_update_request(qos_req, vote);
		if (err < 0)
			return err;
		++qos_req;
	}

	if (type == QOS_MAX)
		qcg->voted = false;
	else
		qcg->voted = true;

	qcg->curr_vote = vote;

	return 0;
}

/* Unregister pm qos requests */
static int remove_group_qos(struct qos_cpu_group *qcg)
{
	int err, cpu;
	struct dev_pm_qos_request *qos_req = qcg->qos_req;

	for_each_cpu(cpu, &qcg->mask) {
		if (!dev_pm_qos_request_active(qos_req)) {
			++qos_req;
			continue;
		}
		err = dev_pm_qos_remove_request(qos_req);
		if (err < 0)
			return err;
		qos_req++;
	}

	return 0;
}

/* Register pm qos request */
static int add_group_qos(struct qos_cpu_group *qcg, enum constraint type)
{
	int cpu, err;
	struct dev_pm_qos_request *qos_req = qcg->qos_req;

	for_each_cpu(cpu, &qcg->mask) {
		memset(qos_req, 0,
				sizeof(struct dev_pm_qos_request));
		err = dev_pm_qos_add_request(get_cpu_device(cpu),
				qos_req,
				DEV_PM_QOS_RESUME_LATENCY,
				type);
		if (err < 0)
			return err;
		qos_req++;
	}
	return 0;
}

/* Function to remove pm qos vote */
static void sdhci_msm_unvote_qos_all(struct work_struct *work)
{
	struct sdhci_msm_host *msm_host = container_of(work,
			struct sdhci_msm_host, pmqos_unvote_work.work);
	struct sdhci_msm_qos_req *qos_req = msm_host->sdhci_qos;
	struct qos_cpu_group *qcg;
	int i, err;

	if (!qos_req)
		return;
	qcg = qos_req->qcg;
	for (i = 0; ((i < qos_req->num_groups) && qcg->initialized); i++,
								qcg++) {
		err = sdhci_msm_update_qos_constraints(qcg, QOS_MAX);
		if (err)
			dev_err(&msm_host->pdev->dev,
				"Failed (%d) removing qos vote(%d)\n", err, i);
	}
}

/* Function to vote pmqos from sdcc. */
static void sdhci_msm_vote_pmqos(struct mmc_host *mmc, int cpu)
{
	struct sdhci_host *host = mmc_priv(mmc);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	struct qos_cpu_group *qcg;

	qcg = cpu_to_group(msm_host->sdhci_qos, cpu);
	if (!qcg) {
		dev_dbg(&msm_host->pdev->dev, "QoS group is undefined\n");
		return;
	}

	if (qcg->voted)
		return;

	if (sdhci_msm_update_qos_constraints(qcg, QOS_PERF))
		dev_err(&qcg->host->pdev->dev, "%s: update qos - failed\n",
				__func__);
	dev_dbg(&msm_host->pdev->dev, "Voted pmqos - cpu: %d\n", cpu);
}

/**
 * sdhci_msm_irq_affinity_notify - Callback for affinity changes
 * @notify: context as to what irq was changed
 * @mask: the new affinity mask
 *
 * This is a callback function used by the irq_set_affinity_notifier function
 * so that we may register to receive changes to the irq affinity masks.
 */
static void
sdhci_msm_irq_affinity_notify(struct irq_affinity_notify *notify,
		const cpumask_t *mask)
{
	struct sdhci_msm_host *msm_host =
		container_of(notify, struct sdhci_msm_host, affinity_notify);
	struct platform_device *pdev = msm_host->pdev;
	struct sdhci_msm_qos_req *qos_req = msm_host->sdhci_qos;
	struct qos_cpu_group *qcg;
	int i, err;

	if (!qos_req)
		return;
	/*
	 * If device is in suspend mode, just update the active mask,
	 * vote would be taken care when device resumes.
	 */
	msm_host->sdhci_qos->active_mask = cpumask_first(mask);
	if (pm_runtime_status_suspended(&pdev->dev))
		return;

	/* Cancel previous scheduled work and unvote votes */
	qcg = qos_req->qcg;
	for (i = 0; i < qos_req->num_groups; i++, qcg++) {
		err = sdhci_msm_update_qos_constraints(qcg, QOS_MAX);
		if (err)
			pr_err("%s: Failed (%d) removing qos vote of grp(%d)\n",
					mmc_hostname(msm_host->mmc), err, i);
	}

	cancel_delayed_work_sync(&msm_host->pmqos_unvote_work);
	sdhci_msm_vote_pmqos(msm_host->mmc,
			msm_host->sdhci_qos->active_mask);
}

/**
 * sdhci_msm_irq_affinity_release - Callback for affinity notifier release
 * @ref: internal core kernel usage
 *
 * This is a callback function used by the irq_set_affinity_notifier function
 * to inform the current notification subscriber that they will no longer
 * receive notifications.
 */
static void
inline sdhci_msm_irq_affinity_release(struct kref __always_unused *ref)
{ }

/* Function for settig up qos based on parsed dt entries */
static int sdhci_msm_setup_qos(struct sdhci_msm_host *msm_host)
{
	struct platform_device *pdev = msm_host->pdev;
	struct sdhci_msm_qos_req *qr = msm_host->sdhci_qos;
	struct qos_cpu_group *qcg = qr->qcg;
	struct mmc_host *mmc = msm_host->mmc;
	struct sdhci_host *host = mmc_priv(mmc);
	int i, err;

	if (!msm_host->sdhci_qos)
		return 0;

	/* Affine irq to first set of mask */
	WARN_ON(irq_set_affinity_hint(host->irq, &qcg->mask));

	/* Setup notifier for case of affinity change/migration */
	msm_host->affinity_notify.notify = sdhci_msm_irq_affinity_notify;
	msm_host->affinity_notify.release = sdhci_msm_irq_affinity_release;
	irq_set_affinity_notifier(host->irq, &msm_host->affinity_notify);

	for (i = 0; i < qr->num_groups; i++, qcg++) {
		qcg->qos_req = kcalloc(cpumask_weight(&qcg->mask),
				sizeof(struct dev_pm_qos_request),
				GFP_KERNEL);
		if (!qcg->qos_req) {
			dev_err(&pdev->dev, "Memory allocation failed\n");
			if (!i)
				return -ENOMEM;
			goto free_mem;
		}
		err = add_group_qos(qcg, S32_MAX);
		if (err < 0) {
			dev_err(&pdev->dev, "Fail (%d) add qos-req: grp-%d\n",
					err, i);
			if (!i) {
				kfree(qcg->qos_req);
				return err;
			}
			goto free_mem;
		}
		qcg->initialized = true;
		dev_dbg(&pdev->dev, "%s: qcg: 0x%08x | mask: 0x%08x\n",
				 __func__, qcg, qcg->mask);
	}

	INIT_DELAYED_WORK(&msm_host->pmqos_unvote_work,
			sdhci_msm_unvote_qos_all);

	/* Vote pmqos during setup for first set of mask*/
	sdhci_msm_update_qos_constraints(qr->qcg, QOS_PERF);
	qr->active_mask = cpumask_first(&qr->qcg->mask);
	return 0;

free_mem:
	while (i--) {
		kfree(qcg->qos_req);
		qcg--;
	}

	return err;
}

/*
 * QoS init function. It parses dt entries and intializes data
 * structures.
 */
static void sdhci_msm_qos_init(struct sdhci_msm_host *msm_host)
{
	struct platform_device *pdev = msm_host->pdev;
	struct device_node *np = pdev->dev.of_node;
	struct device_node *group_node;
	struct sdhci_msm_qos_req *qr;
	struct qos_cpu_group *qcg;
	int i, err, mask = 0;

	qr = kzalloc(sizeof(*qr), GFP_KERNEL);
	if (!qr)
		return;

	msm_host->sdhci_qos = qr;

	/* find numbers of qos child node present */
	qr->num_groups = of_get_available_child_count(np);
	dev_dbg(&pdev->dev, "num-groups: %d\n", qr->num_groups);
	if (!qr->num_groups) {
		dev_err(&pdev->dev, "QoS groups undefined\n");
		kfree(qr);
		msm_host->sdhci_qos = NULL;
		return;
	}
	qcg = kzalloc(sizeof(*qcg) * qr->num_groups, GFP_KERNEL);
	if (!qcg) {
		msm_host->sdhci_qos = NULL;
		kfree(qr);
		return;
	}

	/*
	 * Assign qos cpu group/cluster to host qos request and
	 * read child entries of qos node
	 */
	qr->qcg = qcg;
	for_each_available_child_of_node(np, group_node) {
		err = of_property_read_u32(group_node, "mask", &mask);
		if (err) {
			dev_dbg(&pdev->dev, "Error reading group mask: %d\n",
					err);
			continue;
		}
		qcg->mask.bits[0] = mask;
		if (!cpumask_subset(&qcg->mask, cpu_possible_mask)) {
			dev_err(&pdev->dev, "Invalid group mask\n");
			goto out_vote_err;
		}

		err = of_property_count_u32_elems(group_node, "vote");
		if (err <= 0) {
			dev_err(&pdev->dev, "1 vote is needed, bailing out\n");
			goto out_vote_err;
		}
		qcg->votes = kmalloc(sizeof(*qcg->votes) * err, GFP_KERNEL);
		if (!qcg->votes)
			goto out_vote_err;
		for (i = 0; i < err; i++) {
			if (of_property_read_u32_index(group_node, "vote", i,
						&qcg->votes[i]))
				goto out_vote_err;
		}
		qcg->host = msm_host;
		++qcg;
	}
	err = sdhci_msm_setup_qos(msm_host);
	if (!err)
		return;
	dev_err(&pdev->dev, "Failed to setup PM QoS.\n");

out_vote_err:
	for (i = 0, qcg = qr->qcg; i < qr->num_groups; i++, qcg++)
		kfree(qcg->votes);
	kfree(qr->qcg);
	kfree(qr);
	msm_host->sdhci_qos = NULL;
}

static ssize_t show_sdhci_msm_clk_gating(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);

	return scnprintf(buf, PAGE_SIZE, "%u\n", msm_host->clk_gating_delay);
}

static ssize_t store_sdhci_msm_clk_gating(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	uint32_t value;

	if (!kstrtou32(buf, 0, &value)) {
		msm_host->clk_gating_delay = value;
		dev_info(dev, "set clk scaling work delay (%u)\n", value);
	}

	return count;
}

static ssize_t show_sdhci_msm_pm_qos(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);

	return scnprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_delay);
}

static ssize_t store_sdhci_msm_pm_qos(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	uint32_t value;

	if (!kstrtou32(buf, 0, &value)) {
		msm_host->pm_qos_delay = value;
		dev_info(dev, "set pm qos work delay (%u)\n", value);
	}

	return count;
}

static void sdhci_msm_init_sysfs_gating_qos(struct device *dev)
{
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	int ret;

	msm_host->clk_gating.show = show_sdhci_msm_clk_gating;
	msm_host->clk_gating.store = store_sdhci_msm_clk_gating;
	sysfs_attr_init(&msm_host->clk_gating.attr);
	msm_host->clk_gating.attr.name = "clk_gating";
	msm_host->clk_gating.attr.mode = 0644;
	ret = device_create_file(dev, &msm_host->clk_gating);
	if (ret) {
		pr_err("%s: %s: failed creating clk gating attr: %d\n",
				mmc_hostname(host->mmc), __func__, ret);
	}

	msm_host->pm_qos.show = show_sdhci_msm_pm_qos;
	msm_host->pm_qos.store = store_sdhci_msm_pm_qos;
	sysfs_attr_init(&msm_host->pm_qos.attr);
	msm_host->pm_qos.attr.name = "pm_qos";
	msm_host->pm_qos.attr.mode = 0644;
	ret = device_create_file(dev, &msm_host->pm_qos);
	if (ret) {
		pr_err("%s: %s: failed creating pm qos attr: %d\n",
				mmc_hostname(host->mmc), __func__, ret);
	}
}

static void sdhci_msm_setup_pm(struct platform_device *pdev,
			struct sdhci_msm_host *msm_host)
{
	pm_runtime_get_noresume(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	if (!(msm_host->mmc->caps & MMC_CAP_SYNC_RUNTIME_PM)) {
		pm_runtime_set_autosuspend_delay(&pdev->dev,
					 MSM_MMC_AUTOSUSPEND_DELAY_MS);
		pm_runtime_use_autosuspend(&pdev->dev);
	}
}

static int sdhci_msm_probe(struct platform_device *pdev)
static int sdhci_msm_probe(struct platform_device *pdev)
{
{
	struct sdhci_host *host;
	struct sdhci_host *host;
@@ -3103,6 +3582,13 @@ static int sdhci_msm_probe(struct platform_device *pdev)
			dev_err(&pdev->dev, "get slot index failed %d\n", ret);
			dev_err(&pdev->dev, "get slot index failed %d\n", ret);
		else if (ret <= 2)
		else if (ret <= 2)
			sdhci_slot[ret-1] = msm_host;
			sdhci_slot[ret-1] = msm_host;

		if (of_property_read_bool(dev->of_node, "non-removable") &&
				strlen(android_boot_dev) &&
				strcmp(android_boot_dev, dev_name(dev))) {
			ret = -ENODEV;
			goto pltfm_free;
		}
	}
	}


	/*
	/*
@@ -3192,6 +3678,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
		dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
		dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
	}
	}


	INIT_DELAYED_WORK(&msm_host->clk_gating_work,
			sdhci_msm_clkgate_bus_delayed_work);

	ret = sdhci_msm_bus_register(msm_host, pdev);
	ret = sdhci_msm_bus_register(msm_host, pdev);
	if (ret && !msm_host->skip_bus_bw_voting) {
	if (ret && !msm_host->skip_bus_bw_voting) {
		dev_err(&pdev->dev, "Bus registration failed (%d)\n", ret);
		dev_err(&pdev->dev, "Bus registration failed (%d)\n", ret);
@@ -3298,20 +3787,27 @@ static int sdhci_msm_probe(struct platform_device *pdev)
		goto vreg_deinit;
		goto vreg_deinit;
	}
	}


	msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
	msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM | MMC_CAP_SYNC_RUNTIME_PM;
	msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
	msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;


#if defined(CONFIG_SDC_QTI)
#if defined(CONFIG_SDC_QTI)
	msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
	msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
#endif
#endif
	pm_runtime_get_noresume(&pdev->dev);
	sdhci_msm_setup_pm(pdev, msm_host);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev,
					 MSM_MMC_AUTOSUSPEND_DELAY_MS);
	pm_runtime_use_autosuspend(&pdev->dev);


	host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
	host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;

	msm_host->workq = create_workqueue("sdhci_msm_generic_swq");
	if (!msm_host->workq)
		dev_err(&pdev->dev, "Generic swq creation failed\n");

	msm_host->clk_gating_delay = MSM_CLK_GATING_DELAY_MS;
	msm_host->pm_qos_delay = MSM_PMQOS_UNVOTING_DELAY_MS;
	/* Initialize pmqos */
	sdhci_msm_qos_init(msm_host);
	/* Initialize sysfs entries */
	sdhci_msm_init_sysfs_gating_qos(dev);

	if (of_property_read_bool(node, "supports-cqe"))
	if (of_property_read_bool(node, "supports-cqe"))
		ret = sdhci_msm_cqe_add_host(host, pdev);
		ret = sdhci_msm_cqe_add_host(host, pdev);
	else
	else
@@ -3347,7 +3843,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
	sdhci_msm_vreg_init(&pdev->dev, msm_host, false);
	sdhci_msm_vreg_init(&pdev->dev, msm_host, false);
bus_unregister:
bus_unregister:
	if (!msm_host->skip_bus_bw_voting) {
	if (!msm_host->skip_bus_bw_voting) {
		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
		sdhci_msm_bus_get_and_set_vote(host, 0);
		sdhci_msm_bus_unregister(&pdev->dev, msm_host);
		sdhci_msm_bus_unregister(&pdev->dev, msm_host);
	}
	}
clk_disable:
clk_disable:
@@ -3366,6 +3862,9 @@ static int sdhci_msm_remove(struct platform_device *pdev)
	struct sdhci_host *host = platform_get_drvdata(pdev);
	struct sdhci_host *host = platform_get_drvdata(pdev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	struct sdhci_msm_qos_req *r = msm_host->sdhci_qos;
	struct qos_cpu_group *qcg;
	int i;
	int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
	int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
		    0xffffffff);
		    0xffffffff);


@@ -3374,6 +3873,19 @@ static int sdhci_msm_remove(struct platform_device *pdev)
	sdhci_msm_vreg_init(&pdev->dev, msm_host, false);
	sdhci_msm_vreg_init(&pdev->dev, msm_host, false);


	pm_runtime_get_sync(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	/* Add delay to complete resume where qos vote is scheduled */
	if (!r)
		goto skip_removing_qos;
	qcg = r->qcg;
	msleep(50);
	for (i = 0; i < r->num_groups; i++, qcg++) {
		sdhci_msm_update_qos_constraints(qcg, QOS_MAX);
		remove_group_qos(qcg);
	}
	destroy_workqueue(msm_host->workq);

skip_removing_qos:
	pm_runtime_disable(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_put_noidle(&pdev->dev);


@@ -3382,7 +3894,7 @@ static int sdhci_msm_remove(struct platform_device *pdev)
	if (!IS_ERR(msm_host->bus_clk))
	if (!IS_ERR(msm_host->bus_clk))
		clk_disable_unprepare(msm_host->bus_clk);
		clk_disable_unprepare(msm_host->bus_clk);
	if (!msm_host->skip_bus_bw_voting) {
	if (!msm_host->skip_bus_bw_voting) {
		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
		sdhci_msm_bus_get_and_set_vote(host, 0);
		sdhci_msm_bus_unregister(&pdev->dev, msm_host);
		sdhci_msm_bus_unregister(&pdev->dev, msm_host);
	}
	}
	sdhci_pltfm_free(pdev);
	sdhci_pltfm_free(pdev);
@@ -3394,11 +3906,19 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	struct sdhci_msm_qos_req *qos_req = msm_host->sdhci_qos;

	if (!qos_req)
		goto skip_qos;
	queue_delayed_work(msm_host->workq,
			&msm_host->pmqos_unvote_work,
			msecs_to_jiffies(msm_host->pm_qos_delay));

skip_qos:
	queue_delayed_work(msm_host->workq,
			&msm_host->clk_gating_work,
			msecs_to_jiffies(msm_host->clk_gating_delay));


	sdhci_msm_registers_save(host);
	clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
				   msm_host->bulk_clks);
	sdhci_msm_bus_voting(host, false);
	return 0;
	return 0;
}
}


@@ -3407,12 +3927,19 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_host *host = dev_get_drvdata(dev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
	struct sdhci_msm_qos_req *qos_req = msm_host->sdhci_qos;
	int ret;
	int ret;


	ret = cancel_delayed_work_sync(&msm_host->clk_gating_work);
	if (!ret) {
		sdhci_msm_bus_voting(host, true);
		ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
		ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
					       msm_host->bulk_clks);
					       msm_host->bulk_clks);
	if (ret)
		if (ret) {
			dev_err(dev, "Failed to enable clocks %d\n", ret);
			sdhci_msm_bus_voting(host, false);
			return ret;
			return ret;
		}


		sdhci_msm_registers_restore(host);
		sdhci_msm_registers_restore(host);
		/*
		/*
@@ -3420,9 +3947,16 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
		 * restore the SDR DLL settings when the clock is ungated.
		 * restore the SDR DLL settings when the clock is ungated.
		 */
		 */
		if (msm_host->restore_dll_config && msm_host->clk_rate)
		if (msm_host->restore_dll_config && msm_host->clk_rate)
		return sdhci_msm_restore_sdr_dll_config(host);
			sdhci_msm_restore_sdr_dll_config(host);
	}

	if (!qos_req)
		return 0;
	ret = cancel_delayed_work_sync(&msm_host->pmqos_unvote_work);
	if (!ret)
		sdhci_msm_vote_pmqos(msm_host->mmc,
					msm_host->sdhci_qos->active_mask);


	sdhci_msm_bus_voting(host, true);
	return 0;
	return 0;
}
}