Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5202070c authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Use bulk clock functions for GMU"

parents 16808973 ada604f0
Loading
Loading
Loading
Loading
+69 −35
Original line number Original line Diff line number Diff line
@@ -885,6 +885,40 @@ static void adreno_of_get_ca_aware_properties(struct adreno_device *adreno_dev,
	}
	}
}
}


/* Dynamically build the OPP table for the GPU device */
static void adreno_build_opp_table(struct device *dev, struct kgsl_pwrctrl *pwr)
{
	unsigned long freq = 0;
	int i;

	/*
	 * First an annoying step: Some targets have clock drivers that
	 * "helpfully" builds a OPP table for us but usually it is wrong.
	 * Go through and filter out unsupported frequencies
	 */

	for (;;) {
		struct dev_pm_opp *opp = dev_pm_opp_find_freq_ceil(dev, &freq);

		if (IS_ERR(opp))
			break;

		for (i = 0; i < pwr->num_pwrlevels; i++) {
			if (freq == pwr->pwrlevels[i].gpu_freq)
				break;
		}

		if (i == pwr->num_pwrlevels)
			dev_pm_opp_remove(dev, freq);

		freq++;
	}

	/* Now add all of our supported frequencies into the tree */
	for (i = 0; i < pwr->num_pwrlevels; i++)
		dev_pm_opp_add(dev, pwr->pwrlevels[i].gpu_freq, 0);
}

static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
		struct device_node *node)
		struct device_node *node)
{
{
@@ -893,44 +927,48 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
	struct device_node *child;
	struct device_node *child;
	int ret;
	int ret;


	/* ADD the GPU OPP table if we define it */
	if (of_find_property(device->pdev->dev.of_node,
			"operating-points-v2", NULL)) {
		ret = dev_pm_opp_of_add_table(&device->pdev->dev);
		if (ret) {
			dev_err(device->dev,
				"Unable to set the GPU OPP table: %d\n", ret);
			return ret;
		}
	}

	pwr->num_pwrlevels = 0;
	pwr->num_pwrlevels = 0;


	for_each_child_of_node(node, child) {
	for_each_child_of_node(node, child) {
		unsigned int index, freq = 0;
		u32 index, freq = 0, voltage, bus;
		struct kgsl_pwrlevel *level;
		struct kgsl_pwrlevel *level;


		if (of_property_read_u32(child, "reg", &index)) {
		ret = of_property_read_u32(child, "reg", &index);
			dev_err(device->dev,
		if (ret) {
				"%pOF: powerlevel index not found\n", child);
			dev_err(device->dev, "%pOF: powerlevel index not found\n",
			of_node_put(child);
				child);
			return -EINVAL;
			goto out;
		}
		}


		if (of_property_read_u32(child, "qcom,gpu-freq", &freq)) {
		ret = of_property_read_u32(child, "qcom,gpu-freq", &freq);
			dev_err(device->dev,
		if (ret) {
				"%pOF: Unable to read qcom,gpu-freq\n", child);
			dev_err(device->dev, "%pOF: Unable to read qcom,gpu-freq\n",
			of_node_put(child);
				child);
			return -EINVAL;
			goto out;
		}
		}


		/* Ignore "zero" powerlevels */
		/* Ignore "zero" powerlevels */
		if (!freq)
		if (!freq)
			continue;
			continue;


		ret = of_property_read_u32(child, "qcom,level", &voltage);
		if (ret) {
			dev_err(device->dev, "%pOF: Unable to read qcom,level\n",
				child);
			goto out;
		}

		ret = kgsl_of_property_read_ddrtype(child, "qcom,bus-freq",
			&bus);
		if (ret) {
			dev_err(device->dev, "%pOF:Unable to read qcom,bus-freq\n",
				child);
			goto out;
		}


		if (index >= KGSL_MAX_PWRLEVELS) {
		if (index >= KGSL_MAX_PWRLEVELS) {
			dev_err(device->dev,
			dev_err(device->dev, "%pOF: Pwrlevel index %d is out of range\n",
				"%pOF: Pwrlevel index %d is out of range\n",
				child, index);
				child, index);
			continue;
			continue;
		}
		}
@@ -941,20 +979,12 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
		level = &pwr->pwrlevels[index];
		level = &pwr->pwrlevels[index];


		level->gpu_freq = freq;
		level->gpu_freq = freq;
		level->bus_freq = bus;
		level->voltage_level = voltage;


		of_property_read_u32(child, "qcom,acd-level",
		of_property_read_u32(child, "qcom,acd-level",
			&level->acd_level);
			&level->acd_level);


		ret = kgsl_of_property_read_ddrtype(child,
			"qcom,bus-freq", &level->bus_freq);
		if (ret) {
			dev_err(device->dev,
				"%pOF: Couldn't read the bus frequency for power level %d\n",
				child, index);
			of_node_put(child);
			return ret;
		}

		level->bus_min = level->bus_freq;
		level->bus_min = level->bus_freq;
		kgsl_of_property_read_ddrtype(child,
		kgsl_of_property_read_ddrtype(child,
			"qcom,bus-min", &level->bus_min);
			"qcom,bus-min", &level->bus_min);
@@ -964,7 +994,11 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
			"qcom,bus-max", &level->bus_max);
			"qcom,bus-max", &level->bus_max);
	}
	}


	adreno_build_opp_table(&device->pdev->dev, pwr);
	return 0;
	return 0;
out:
	of_node_put(child);
	return ret;
}
}


static void adreno_of_get_initial_pwrlevel(struct adreno_device *adreno_dev,
static void adreno_of_get_initial_pwrlevel(struct adreno_device *adreno_dev,
+2 −2
Original line number Original line Diff line number Diff line
@@ -2858,8 +2858,8 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev)
	 * Ensure that this function is not called when dispatcher
	 * Ensure that this function is not called when dispatcher
	 * mutex is held and device is started
	 * mutex is held and device is started
	 */
	 */
	if (mutex_is_locked(&dispatcher->mutex) &&

		(__mutex_owner(&dispatcher->mutex) == current))
	if (WARN_ON(mutex_is_locked(&dispatcher->mutex)))
		return -EDEADLK;
		return -EDEADLK;


	adreno_get_gpu_halt(adreno_dev);
	adreno_get_gpu_halt(adreno_dev);
+3 −3
Original line number Original line Diff line number Diff line
@@ -4346,7 +4346,7 @@ static vm_fault_t
kgsl_gpumem_vm_fault(struct vm_fault *vmf)
kgsl_gpumem_vm_fault(struct vm_fault *vmf)
{
{
	struct kgsl_mem_entry *entry = vmf->vma->vm_private_data;
	struct kgsl_mem_entry *entry = vmf->vma->vm_private_data;
	int ret;
	vm_fault_t ret;


	if (!entry)
	if (!entry)
		return VM_FAULT_SIGBUS;
		return VM_FAULT_SIGBUS;
@@ -4354,7 +4354,7 @@ kgsl_gpumem_vm_fault(struct vm_fault *vmf)
		return VM_FAULT_SIGBUS;
		return VM_FAULT_SIGBUS;


	ret = entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf);
	ret = entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf);
	if ((ret == 0) || (ret == VM_FAULT_NOPAGE))
	if (!ret || ret == VM_FAULT_NOPAGE)
		entry->priv->gpumem_mapped += PAGE_SIZE;
		entry->priv->gpumem_mapped += PAGE_SIZE;


	return ret;
	return ret;
@@ -4776,7 +4776,7 @@ static int _register_device(struct kgsl_device *device)
	}
	}


	device->dev->dma_mask = &dma_mask;
	device->dev->dma_mask = &dma_mask;
	arch_setup_dma_ops(device->dev, 0, 0, NULL, false);
	set_dma_ops(device->dev, NULL);


	dev_set_drvdata(&device->pdev->dev, device);
	dev_set_drvdata(&device->pdev->dev, device);
	return 0;
	return 0;
+2 −2
Original line number Original line Diff line number Diff line
@@ -163,8 +163,8 @@ struct kgsl_memdesc;


struct kgsl_memdesc_ops {
struct kgsl_memdesc_ops {
	unsigned int vmflags;
	unsigned int vmflags;
	int (*vmfault)(struct kgsl_memdesc *memdesc, struct vm_area_struct *vma,
	vm_fault_t (*vmfault)(struct kgsl_memdesc *memdesc,
		       struct vm_fault *vmf);
		struct vm_area_struct *vma, struct vm_fault *vmf);
	void (*free)(struct kgsl_memdesc *memdesc);
	void (*free)(struct kgsl_memdesc *memdesc);
	int (*map_kernel)(struct kgsl_memdesc *memdesc);
	int (*map_kernel)(struct kgsl_memdesc *memdesc);
	void (*unmap_kernel)(struct kgsl_memdesc *memdesc);
	void (*unmap_kernel)(struct kgsl_memdesc *memdesc);
+42 −118
Original line number Original line Diff line number Diff line
@@ -4,6 +4,7 @@
 */
 */


#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/io.h>
@@ -559,7 +560,7 @@ static int gmu_dcvs_set(struct kgsl_device *device,


struct rpmh_arc_vals {
struct rpmh_arc_vals {
	unsigned int num;
	unsigned int num;
	uint16_t val[MAX_GX_LEVELS];
	const u16 *val;
};
};


static const char gfx_res_id[] = "gfx.lvl";
static const char gfx_res_id[] = "gfx.lvl";
@@ -584,22 +585,9 @@ enum rpmh_vote_type {
static int rpmh_arc_cmds(struct gmu_device *gmu,
static int rpmh_arc_cmds(struct gmu_device *gmu,
		struct rpmh_arc_vals *arc, const char *res_id)
		struct rpmh_arc_vals *arc, const char *res_id)
{
{
	unsigned int len;
	size_t len = 0;


	memset(arc, 0, sizeof(*arc));
	arc->val = cmd_db_read_aux_data(res_id, &len);

	len = cmd_db_read_aux_data_len(res_id);
	if (len == 0)
		return -EINVAL;

	if (len > (MAX_GX_LEVELS << 1)) {
		dev_err(&gmu->pdev->dev,
			"gfx cmddb size %d larger than alloc buf %d of %s\n",
			len, (MAX_GX_LEVELS << 1), res_id);
		return -EINVAL;
	}

	cmd_db_read_aux_data(res_id, (uint8_t *)arc->val, len);


	/*
	/*
	 * cmd_db_read_aux_data() gives us a zero-padded table of
	 * cmd_db_read_aux_data() gives us a zero-padded table of
@@ -703,45 +691,22 @@ static int rpmh_arc_votes_init(struct kgsl_device *device,
{
{
	unsigned int num_freqs;
	unsigned int num_freqs;
	u16 vlvl_tbl[MAX_GX_LEVELS];
	u16 vlvl_tbl[MAX_GX_LEVELS];
	unsigned int *freq_tbl;
	int i;
	int i;
	struct dev_pm_opp *opp;


	if (type == GMU_ARC_VOTE)
	if (type == GMU_ARC_VOTE)
		return rpmh_gmu_arc_votes_init(gmu, pri_rail, sec_rail);
		return rpmh_gmu_arc_votes_init(gmu, pri_rail, sec_rail);


	num_freqs = gmu->num_gpupwrlevels;
	num_freqs = gmu->num_gpupwrlevels;
	freq_tbl = gmu->gpu_freqs;


	if (num_freqs > pri_rail->num || num_freqs > MAX_GX_LEVELS) {
	if (num_freqs > pri_rail->num) {
		dev_err(&gmu->pdev->dev,
		dev_err(&gmu->pdev->dev,
			"Defined more GPU DCVS levels than RPMh can support\n");
			"Defined more GPU DCVS levels than RPMh can support\n");
		return -EINVAL;
		return -EINVAL;
	}
	}


	memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
	memset(vlvl_tbl, 0, sizeof(vlvl_tbl));

	for (i = 0; i < num_freqs; i++)
	/* Get the values from OPP API */
		vlvl_tbl[i] = gmu->pwrlevels[i].level;
	for (i = 0; i < num_freqs; i++) {
		/* Hardcode VLVL 0 because it is not present in OPP */
		if (freq_tbl[i] == 0) {
			vlvl_tbl[i] = 0;
			continue;
		}

		opp = dev_pm_opp_find_freq_exact(&device->pdev->dev,
			freq_tbl[i], true);

		if (IS_ERR(opp)) {
			dev_err(&gmu->pdev->dev,
				"Failed to find opp freq %d for GPU\n",
				freq_tbl[i]);
			return PTR_ERR(opp);
		}

		vlvl_tbl[i] = dev_pm_opp_get_voltage(opp);
		dev_pm_opp_put(opp);
	}


	return setup_volt_dependency_tbl(gmu->rpmh_votes.gx_votes, pri_rail,
	return setup_volt_dependency_tbl(gmu->rpmh_votes.gx_votes, pri_rail,
						sec_rail, vlvl_tbl, num_freqs);
						sec_rail, vlvl_tbl, num_freqs);
@@ -978,34 +943,6 @@ static int gmu_reg_probe(struct kgsl_device *device)
	return 0;
	return 0;
}
}


static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
{
	const char *cname;
	struct property *prop;
	struct clk *c;
	int i = 0;

	of_property_for_each_string(node, "clock-names", prop, cname) {
		c = devm_clk_get(&gmu->pdev->dev, cname);

		if (IS_ERR(c)) {
			dev_err(&gmu->pdev->dev,
				"dt: Couldn't get GMU clock: %s\n", cname);
			return PTR_ERR(c);
		}

		if (i >= MAX_GMU_CLKS) {
			dev_err(&gmu->pdev->dev,
				"dt: too many GMU clocks defined\n");
			return -EINVAL;
		}

		gmu->clks[i++] = c;
	}

	return 0;
}

static int gmu_regulators_probe(struct gmu_device *gmu,
static int gmu_regulators_probe(struct gmu_device *gmu,
		struct device_node *node)
		struct device_node *node)
{
{
@@ -1218,7 +1155,7 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
	struct kgsl_hfi *hfi;
	struct kgsl_hfi *hfi;
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int i = 0, ret = -ENXIO, index = 0;
	int i = 0, ret = -ENXIO, index;


	gmu = kzalloc(sizeof(struct gmu_device), GFP_KERNEL);
	gmu = kzalloc(sizeof(struct gmu_device), GFP_KERNEL);


@@ -1242,10 +1179,19 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
	if (ret)
	if (ret)
		goto error;
		goto error;


	/* Set up GMU clocks */
	ret = devm_clk_bulk_get_all(&gmu->pdev->dev, &gmu->clks);
	ret = gmu_clocks_probe(gmu, node);
	if (ret < 0)
	if (ret)
		goto error;

	gmu->num_clks = ret;

	/* Get a pointer to the GMU clock */
	gmu->gmu_clk = kgsl_of_clk_by_name(gmu->clks, gmu->num_clks, "gmu_clk");
	if (!gmu->gmu_clk) {
		dev_err(&gmu->pdev->dev, "Couldn't get gmu_clk\n");
		ret = -ENODEV;
		goto error;
		goto error;
	}


	/* Set up GMU IOMMU and shared memory with GMU */
	/* Set up GMU IOMMU and shared memory with GMU */
	ret = gmu_iommu_init(gmu, node);
	ret = gmu_iommu_init(gmu, node);
@@ -1288,12 +1234,21 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
	tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long) gmu);
	tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long) gmu);
	hfi->kgsldev = device;
	hfi->kgsldev = device;


	/* Add a dummy level for "off" that the GMU expects */
	if (WARN(pwr->num_pwrlevels + 1 > ARRAY_SIZE(gmu->pwrlevels),
	gmu->gpu_freqs[index++] = 0;
		"Too many GPU powerlevels for the GMU HFI\n")) {
		ret = -EINVAL;
		goto error;
	}

	/* Add a dummy level for "off" because the GMU expects it */
	gmu->pwrlevels[0].freq = 0;
	gmu->pwrlevels[0].level = 0;


	/* GMU power levels are in ascending order */
	/* GMU power levels are in ascending order */
	for (i = pwr->num_pwrlevels - 1; i >= 0; i--)
	for (index = 1, i = pwr->num_pwrlevels - 1; i >= 0; i--, index++) {
		gmu->gpu_freqs[index++] = pwr->pwrlevels[i].gpu_freq;
		gmu->pwrlevels[index].freq = pwr->pwrlevels[i].gpu_freq;
		gmu->pwrlevels[index].level = pwr->pwrlevels[i].voltage_level;
	}


	gmu->num_gpupwrlevels = pwr->num_pwrlevels + 1;
	gmu->num_gpupwrlevels = pwr->num_pwrlevels + 1;


@@ -1331,48 +1286,30 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
static int gmu_enable_clks(struct kgsl_device *device)
static int gmu_enable_clks(struct kgsl_device *device)
{
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	int ret, j = 0;
	int ret;

	if (IS_ERR_OR_NULL(gmu->clks[0]))
		return -EINVAL;


	ret = clk_set_rate(gmu->clks[0], GMU_FREQUENCY);
	ret = clk_set_rate(gmu->gmu_clk, GMU_FREQUENCY);
	if (ret) {
	if (ret) {
		dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
		dev_err(&gmu->pdev->dev, "Unable to set GMU clock\n");
				GMU_FREQUENCY);
		return ret;
		return ret;
	}
	}


	while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
	ret = clk_bulk_prepare_enable(gmu->num_clks, gmu->clks);
		ret = clk_prepare_enable(gmu->clks[j]);
	if (ret) {
	if (ret) {
			dev_err(&gmu->pdev->dev,
		dev_err(&gmu->pdev->dev, "Cannot enable GMU clocks\n");
					"fail to enable gpucc clk idx %d\n",
					j);
		return ret;
		return ret;
	}
	}
		j++;
	}


	set_bit(GMU_CLK_ON, &device->gmu_core.flags);
	set_bit(GMU_CLK_ON, &device->gmu_core.flags);
	return 0;
	return 0;
}
}


static int gmu_disable_clks(struct kgsl_device *device)
static void gmu_disable_clks(struct kgsl_device *device)
{
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	int j = 0;

	if (IS_ERR_OR_NULL(gmu->clks[0]))
		return 0;

	while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
		clk_disable_unprepare(gmu->clks[j]);
		j++;
	}


	clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
	clear_bit(GMU_CLK_ON, &device->gmu_core.flags);
	clear_bit(GMU_CLK_ON, &device->gmu_core.flags);
	return 0;


}
}


@@ -1619,7 +1556,6 @@ static void gmu_remove(struct kgsl_device *device)
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct kgsl_hfi *hfi;
	struct kgsl_hfi *hfi;
	int i = 0;


	if (gmu == NULL || gmu->pdev == NULL)
	if (gmu == NULL || gmu->pdev == NULL)
		return;
		return;
@@ -1635,11 +1571,6 @@ static void gmu_remove(struct kgsl_device *device)


	clear_bit(ADRENO_ACD_CTRL, &adreno_dev->pwrctrl_flag);
	clear_bit(ADRENO_ACD_CTRL, &adreno_dev->pwrctrl_flag);


	while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
		gmu->clks[i] = NULL;
		i++;
	}

	icc_put(gmu->icc_path);
	icc_put(gmu->icc_path);


	if (gmu->fw_image) {
	if (gmu->fw_image) {
@@ -1649,13 +1580,6 @@ static void gmu_remove(struct kgsl_device *device)


	gmu_memory_close(gmu);
	gmu_memory_close(gmu);


	for (i = 0; i < MAX_GMU_CLKS; i++) {
		if (gmu->clks[i]) {
			devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
			gmu->clks[i] = NULL;
		}
	}

	if (gmu->gx_gdsc) {
	if (gmu->gx_gdsc) {
		devm_regulator_put(gmu->gx_gdsc);
		devm_regulator_put(gmu->gx_gdsc);
		gmu->gx_gdsc = NULL;
		gmu->gx_gdsc = NULL;
Loading