Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6a305f3b authored by Taniya Das's avatar Taniya Das Committed by Chetan C R
Browse files

clk: qcom: cpu: Add support to L2 PC latency for SDM429/39



This Add support for L2 pc during LPM mode and also adds
support to check if the desired requested frequency can
be derived from GPLL0 for SDM429, SDM439 and QM215.

Change-Id: Ic1e45de1fde42b75cd13b8bdf2953331c9381a77
Signed-off-by: default avatarTaniya Das <tdas@codeaurora.org>
parent 2da454b4
Loading
Loading
Loading
Loading
+118 −60
Original line number Diff line number Diff line
@@ -38,19 +38,6 @@ enum apcs_mux_clk_parent {
	P_APCS_CPU_PLL,
};

struct pll_spm_ctrl {
	u32 offset;
	u32 force_event_offset;
	u32 event_bit;
	void __iomem *spm_base;
};

static struct pll_spm_ctrl apcs_pll_spm = {
	.offset = 0x50,
	.force_event_offset = 0x4,
	.event_bit = 0x4,
};

static const struct parent_map apcs_mux_clk_parent_map0[] = {
	{ P_BI_TCXO_AO, 0 },
	{ P_GPLL0_AO_OUT_MAIN, 4 },
@@ -119,6 +106,20 @@ static int cpucc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
	return mux_div_set_src_div(cpuclk, cpuclk->src, cpuclk->div);
}

static bool freq_from_gpll0(unsigned long req_rate, unsigned long gpll0_rate)
{
	unsigned long temp;
	int div;

	for (div = 10; div <= 40; div += 5) {
		temp = mult_frac(gpll0_rate, 10, div);
		if (req_rate == temp)
			return true;
	}

	return false;
}

static int cpucc_clk_determine_rate(struct clk_hw *hw,
					struct clk_rate_request *req)
{
@@ -145,7 +146,7 @@ static int cpucc_clk_determine_rate(struct clk_hw *hw,
	apcs_gpll0_rate = clk_hw_get_rate(apcs_gpll0_hw);
	apcs_gpll0_rrate = DIV_ROUND_UP(apcs_gpll0_rate, 1000000) * 1000000;

	if (rate <= apcs_gpll0_rrate) {
	if (freq_from_gpll0(rate, apcs_gpll0_rrate)) {
		req->best_parent_hw = apcs_gpll0_hw;
		req->best_parent_rate = apcs_gpll0_rrate;
		div = DIV_ROUND_CLOSEST(2 * apcs_gpll0_rrate, rate) - 1;
@@ -235,48 +236,7 @@ static u8 cpucc_clk_get_parent(struct clk_hw *hw)
	return clk_regmap_mux_div_ops.get_parent(hw);
}

static void spm_event(struct pll_spm_ctrl *apcs_pll_spm, bool enable)
{
	void __iomem *base = apcs_pll_spm->spm_base;
	u32 offset, force_event_offset, bit, val;

	if (!apcs_pll_spm || !base)
		return;

	offset = apcs_pll_spm->offset;
	force_event_offset = apcs_pll_spm->force_event_offset;
	bit = apcs_pll_spm->event_bit;

	if (enable) {
		/* L2_SPM_FORCE_EVENT_EN */
		val = readl_relaxed(base + offset);
		val |= BIT(bit);
		writel_relaxed(val, (base + offset));
		/* Ensure that the write above goes through. */
		mb();
		/* L2_SPM_FORCE_EVENT */
		val = readl_relaxed(base + offset + force_event_offset);
		val |= BIT(bit);
		writel_relaxed(val, (base + offset + force_event_offset));
		/* Ensure that the write above goes through. */
		mb();

	} else {
		/* L2_SPM_FORCE_EVENT */
		val = readl_relaxed(base + offset + force_event_offset);
		val &= ~BIT(bit);
		writel_relaxed(val, (base + offset + force_event_offset));
		/* Ensure that the write above goes through. */
		mb();

		/* L2_SPM_FORCE_EVENT_EN */
		val = readl_relaxed(base + offset);
		val &= ~BIT(bit);
		writel_relaxed(val, (base + offset));
		/* Ensure that the write above goes through. */
		mb();
	}
}
static void do_nothing(void *unused) { }

/*
 * We use the notifier function for switching to a temporary safe configuration
@@ -287,17 +247,31 @@ static int cpucc_notifier_cb(struct notifier_block *nb, unsigned long event,
{
	struct clk_regmap_mux_div *cpuclk = container_of(nb,
					struct clk_regmap_mux_div, clk_nb);
	bool hw_low_power_ctrl = cpuclk->clk_lpm.hw_low_power_ctrl;
	int ret = 0, safe_src = cpuclk->safe_src;

	switch (event) {
	case PRE_RATE_CHANGE:
		if (hw_low_power_ctrl) {
			memset(&cpuclk->clk_lpm.req, 0,
					sizeof(cpuclk->clk_lpm.req));
			cpumask_copy(&cpuclk->clk_lpm.req.cpus_affine,
			(const struct cpumask *)&cpuclk->clk_lpm.cpu_reg_mask);
			cpuclk->clk_lpm.req.type = PM_QOS_REQ_AFFINE_CORES;
			pm_qos_add_request(&cpuclk->clk_lpm.req,
				PM_QOS_CPU_DMA_LATENCY,
				cpuclk->clk_lpm.cpu_latency_no_l2_pc_us - 1);
			smp_call_function_any(&cpuclk->clk_lpm.cpu_reg_mask,
				do_nothing, NULL, 1);
		}

		/* set the mux to safe source gpll0_ao_out & div */
		ret = mux_div_set_src_div(cpuclk, safe_src, 1);
		spm_event(&apcs_pll_spm, true);
		mux_div_set_src_div(cpuclk, safe_src, 1);
		break;
	case POST_RATE_CHANGE:
		if (cpuclk->src != safe_src)
			spm_event(&apcs_pll_spm, false);
		if (hw_low_power_ctrl)
			pm_qos_remove_request(&cpuclk->clk_lpm.req);

		break;
	case ABORT_RATE_CHANGE:
		pr_err("Error in configuring PLL - stay at safe src only\n");
@@ -340,6 +314,10 @@ static struct clk_pll apcs_cpu_pll0 = {
	.config_reg = 0x10,
	.status_reg = 0x1c,
	.status_bit = 16,
	.spm_ctrl = {
		.offset = 0x50,
		.event_bit = 0x4,
	},
	.clkr.hw.init = &(struct clk_init_data){
		.name = "apcs_cpu_pll0",
		.parent_names = (const char *[]){ "bi_tcxo_ao" },
@@ -364,6 +342,16 @@ static struct clk_regmap_mux_div apcs_mux_c0_clk = {
	.safe_div = 1,
	.parent_map = apcs_mux_clk_parent_map0,
	.clk_nb.notifier_call = cpucc_notifier_cb,
	.clk_lpm = {
		/* CPU 4 - 7 */
		.cpu_reg_mask = { 0xf0 },
		.latency_lvl = {
			.affinity_level = LPM_AFF_LVL_L2,
			.reset_level = LPM_RESET_LVL_GDHS,
			.level_name = "pwr",
		},
		.cpu_latency_no_l2_pc_us = 300,
	},
	.clkr.hw.init = &(struct clk_init_data) {
		.name = "apcs_mux_c0_clk",
		.parent_names = apcs_mux_clk_c0_parent_name0,
@@ -382,6 +370,10 @@ static struct clk_pll apcs_cpu_pll1 = {
	.config_reg = 0x10,
	.status_reg = 0x1c,
	.status_bit = 16,
	.spm_ctrl = {
		.offset = 0x50,
		.event_bit = 0x4,
	},
	.clkr.hw.init = &(struct clk_init_data){
		.name = "apcs_cpu_pll1",
		.parent_names = (const char *[]){ "bi_tcxo_ao" },
@@ -406,6 +398,16 @@ static struct clk_regmap_mux_div apcs_mux_c1_clk = {
	.safe_div = 1,
	.parent_map = apcs_mux_clk_parent_map0,
	.clk_nb.notifier_call = cpucc_notifier_cb,
	.clk_lpm  = {
		/* CPU 0 - 3*/
		.cpu_reg_mask = { 0xf },
		.latency_lvl = {
			.affinity_level = LPM_AFF_LVL_L2,
			.reset_level = LPM_RESET_LVL_GDHS,
			.level_name = "perf",
		},
		.cpu_latency_no_l2_pc_us = 300,
	},
	.clkr.hw.init = &(struct clk_init_data) {
		.name = "apcs_mux_c1_clk",
		.parent_names = apcs_mux_clk_c1_parent_name0,
@@ -1144,6 +1146,7 @@ static int cpucc_driver_probe(struct platform_device *pdev)

	/* For safe freq switching during rate change */
	if (is_sdm439) {
		apcs_mux_c0_clk.clk_lpm.hw_low_power_ctrl = true;
		ret = clk_notifier_register(apcs_mux_c0_clk.clkr.hw.clk,
						&apcs_mux_c0_clk.clk_nb);
		if (ret) {
@@ -1151,8 +1154,14 @@ static int cpucc_driver_probe(struct platform_device *pdev)
					ret);
			return ret;
		}
		ret = clk_prepare_enable(apcs_cpu_pll0.clkr.hw.clk);
		if (ret) {
			dev_err(dev, "failed to Enable PLL0 clock: %d\n", ret);
			return ret;
		}
	}

	apcs_mux_c1_clk.clk_lpm.hw_low_power_ctrl = true;
	ret = clk_notifier_register(apcs_mux_c1_clk.clkr.hw.clk,
						&apcs_mux_c1_clk.clk_nb);
	if (ret) {
@@ -1160,6 +1169,12 @@ static int cpucc_driver_probe(struct platform_device *pdev)
		return ret;
	}

	ret = clk_prepare_enable(apcs_cpu_pll1.clkr.hw.clk);
	if (ret) {
		dev_err(dev, "failed to Enable PLL1 clock: %d\n", ret);
		return ret;
	}

	/*
	 * To increase the enable count for the clocks so
	 * that they dont get disabled during late init.
@@ -1336,6 +1351,49 @@ static int __init cpu_clock_init(void)
}
early_initcall(cpu_clock_init);

static int __init clock_cpu_lpm_get_latency(void)
{
	int rc;
	bool is_sdm439 = false;
	struct device_node *ofnode = of_find_compatible_node(NULL, NULL,
					"qcom,cpu-clock-sdm439");
	if (ofnode)
		is_sdm439 = true;

	if (!ofnode)
		ofnode = of_find_compatible_node(NULL, NULL,
				"qcom,cpu-clock-sdm429");

	if (!ofnode)
		ofnode = of_find_compatible_node(NULL, NULL,
				"qcom,cpu-clock-qm215");

	if (!ofnode) {
		pr_err("device node not initialized\n");
		return -ENOMEM;
	}

	rc = lpm_get_latency(&apcs_mux_c1_clk.clk_lpm.latency_lvl,
			&apcs_mux_c1_clk.clk_lpm.cpu_latency_no_l2_pc_us);
	if (rc < 0)
		pr_err("Failed to get the L2 PC value for perf\n");

	if (is_sdm439) {
		rc = lpm_get_latency(&apcs_mux_c0_clk.clk_lpm.latency_lvl,
			&apcs_mux_c0_clk.clk_lpm.cpu_latency_no_l2_pc_us);
		if (rc < 0)
			pr_err("Failed to get the L2 PC value for pwr\n");
		pr_debug("Latency for pwr cluster : %d\n",
			apcs_mux_c0_clk.clk_lpm.cpu_latency_no_l2_pc_us);
	}

	pr_debug("Latency for perf cluster : %d\n",
		apcs_mux_c1_clk.clk_lpm.cpu_latency_no_l2_pc_us);

	return rc;
}
late_initcall_sync(clock_cpu_lpm_get_latency);

MODULE_ALIAS("platform:cpu");
MODULE_DESCRIPTION("SDM CPU clock Driver");
MODULE_LICENSE("GPL v2");