Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a7f4ba8 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Do not access GMU_HFI_ON flag in power control"

parents 3af6f5b3 4f193e8b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -2135,7 +2135,7 @@ static int adreno_stop(struct kgsl_device *device)
			gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);
			if (gmu_core_regulator_isenabled(device)) {
				/* GPU is on. Try recovery */
				gmu_core_setbit(device, GMU_FAULT);
				set_bit(GMU_FAULT, &device->gmu_core.flags);
				gmu_core_snapshot(device);
				error = -EINVAL;
			} else {
@@ -2177,7 +2177,7 @@ static int adreno_stop(struct kgsl_device *device)
	if (!error && GMU_DEV_OP_VALID(gmu_dev_ops, wait_for_lowest_idle) &&
			gmu_dev_ops->wait_for_lowest_idle(adreno_dev)) {

		gmu_core_setbit(device, GMU_FAULT);
		set_bit(GMU_FAULT, &device->gmu_core.flags);
		gmu_core_snapshot(device);
		/*
		 * Assume GMU hang after 10ms without responding.
+6 −5
Original line number Diff line number Diff line
@@ -373,7 +373,7 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
	int val;

	/* Only trigger wakeup sequence if sleep sequence was done earlier */
	if (!test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
	if (!test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags))
		return 0;

	gmu_core_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
@@ -406,7 +406,7 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
	gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);

	/* Clear sleep sequence flag as wakeup sequence is successful */
	clear_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
	clear_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags);

	/* Enable the power counter because it was disabled before slumber */
	gmu_core_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
@@ -423,7 +423,7 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int ret;

	if (test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
	if (test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags))
		return 0;

	/* RSC sleep sequence is different on v1 */
@@ -468,7 +468,7 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
			test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
		gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);

	set_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
	set_bit(GMU_RSCC_SLEEP_SEQ_DONE, &device->gmu_core.flags);
	return 0;
}

@@ -955,7 +955,8 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
		/* Turn on TCM retention */
		gmu_core_regwrite(device, A6XX_GMU_GENERAL_7, 1);

		if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
		if (!test_and_set_bit(GMU_BOOT_INIT_DONE,
			&device->gmu_core.flags))
			ret = _load_gmu_rpmh_ucode(device);
		else
			ret = a6xx_rpmh_power_on_gpu(device);
+1 −1
Original line number Diff line number Diff line
@@ -656,7 +656,7 @@ static inline int kgsl_state_is_awake(struct kgsl_device *device)
		device->state == KGSL_STATE_AWARE)
		return true;
	else if (gmu_core_isenabled(device) &&
			gmu_core_testbit(device, GMU_CLK_ON))
			test_bit(GMU_CLK_ON, &device->gmu_core.flags))
		return true;
	else
		return false;
+46 −120
Original line number Diff line number Diff line
@@ -24,9 +24,7 @@
#include "kgsl_device.h"
#include "kgsl_gmu.h"
#include "kgsl_hfi.h"
#include "a6xx_reg.h"
#include "adreno.h"
#include "kgsl_trace.h"

#define GMU_CONTEXT_USER		0
#define GMU_CONTEXT_KERNEL		1
@@ -611,6 +609,18 @@ static int gmu_dcvs_set(struct kgsl_device *device,
		.bw = INVALID_DCVS_IDX,
	};

	/* Do not set to XO and lower GPU clock vote from GMU */
	if ((gpu_pwrlevel != INVALID_DCVS_IDX) &&
			(gpu_pwrlevel >= gmu->num_gpupwrlevels - 1))
		return -EINVAL;

	/* If GMU has not been started, save it */
	if (!test_bit(GMU_HFI_ON, &device->gmu_core.flags)) {
		/* store clock change request */
		set_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags);
		return 0;
	}

	if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
		req.freq = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;

@@ -619,13 +629,15 @@ static int gmu_dcvs_set(struct kgsl_device *device,

	/* GMU will vote for slumber levels through the sleep sequence */
	if ((req.freq == INVALID_DCVS_IDX) &&
		(req.bw == INVALID_DCVS_IDX))
		(req.bw == INVALID_DCVS_IDX)) {
		clear_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags);
		return 0;
	}

	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
		ret = gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev,
			GMU_DCVS_NOHFI, req.freq, req.bw);
	else if (test_bit(GMU_HFI_ON, &gmu->flags))
	else if (test_bit(GMU_HFI_ON, &device->gmu_core.flags))
		ret = hfi_send_req(gmu, H2F_MSG_GX_BW_PERF_VOTE, &req);

	if (ret) {
@@ -637,6 +649,8 @@ static int gmu_dcvs_set(struct kgsl_device *device,
		adreno_dispatcher_schedule(device);
	}

	/* indicate actual clock change */
	clear_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags);
	return ret;
}

@@ -1076,8 +1090,9 @@ static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
	return 0;
}

static int gmu_reg_probe(struct gmu_device *gmu)
static int gmu_reg_probe(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	struct resource *res;

	res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM,
@@ -1097,10 +1112,9 @@ static int gmu_reg_probe(struct gmu_device *gmu)

	gmu->reg_phys = res->start;
	gmu->reg_len = resource_size(res);

	gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
			resource_size(res));
	if (gmu->reg_virt == NULL) {
	device->gmu_core.reg_virt = devm_ioremap(&gmu->pdev->dev,
			res->start, resource_size(res));
	if (device->gmu_core.reg_virt == NULL) {
		dev_err(&gmu->pdev->dev, "kgsl_gmu_reg ioremap failed\n");
		return -ENODEV;
	}
@@ -1264,8 +1278,7 @@ static int gmu_irq_probe(struct kgsl_device *device, struct gmu_device *gmu)
}

/* Do not access any GMU registers in GMU probe function */
static int gmu_probe(struct kgsl_device *device,
		struct device_node *node, unsigned long flags)
static int gmu_probe(struct kgsl_device *device, struct device_node *node)
{
	struct gmu_device *gmu;
	struct gmu_memdesc *mem_addr = NULL;
@@ -1279,11 +1292,10 @@ static int gmu_probe(struct kgsl_device *device,
	if (gmu == NULL)
		return -ENOMEM;

	device->gmu_core.ptr = (void *)gmu;
	hfi = &gmu->hfi;
	gmu->load_mode = TCM_BOOT;

	gmu->ver = ~0U;
	gmu->flags = flags;

	gmu->pdev = of_find_device_by_node(node);
	of_dma_configure(&gmu->pdev->dev, node);
@@ -1305,7 +1317,7 @@ static int gmu_probe(struct kgsl_device *device,
	mem_addr = gmu->hfi_mem;

	/* Map and reserve GMU CSRs registers */
	ret = gmu_reg_probe(gmu);
	ret = gmu_reg_probe(device);
	if (ret)
		goto error;

@@ -1372,9 +1384,8 @@ static int gmu_probe(struct kgsl_device *device,

	/* disable LM during boot time */
	clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
	set_bit(GMU_ENABLED, &gmu->flags);
	set_bit(GMU_ENABLED, &device->gmu_core.flags);

	device->gmu_core.ptr = (void *)gmu;
	device->gmu_core.dev_ops = &adreno_a6xx_gmudev;

	return 0;
@@ -1384,8 +1395,9 @@ static int gmu_probe(struct kgsl_device *device,
	return ret;
}

static int gmu_enable_clks(struct gmu_device *gmu)
static int gmu_enable_clks(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	int ret, j = 0;

	if (IS_ERR_OR_NULL(gmu->clks[0]))
@@ -1409,12 +1421,13 @@ static int gmu_enable_clks(struct gmu_device *gmu)
		j++;
	}

	set_bit(GMU_CLK_ON, &gmu->flags);
	set_bit(GMU_CLK_ON, &device->gmu_core.flags);
	return 0;
}

static int gmu_disable_clks(struct gmu_device *gmu)
static int gmu_disable_clks(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	int j = 0;

	if (IS_ERR_OR_NULL(gmu->clks[0]))
@@ -1425,7 +1438,7 @@ static int gmu_disable_clks(struct gmu_device *gmu)
		j++;
	}

	clear_bit(GMU_CLK_ON, &gmu->flags);
	clear_bit(GMU_CLK_ON, &device->gmu_core.flags);
	return 0;

}
@@ -1488,7 +1501,7 @@ static int gmu_suspend(struct kgsl_device *device)
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	if (!test_bit(GMU_CLK_ON, &gmu->flags))
	if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags))
		return 0;

	/* Pending message in all queues are abandoned */
@@ -1498,7 +1511,7 @@ static int gmu_suspend(struct kgsl_device *device)
	if (gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
		return -EINVAL;

	gmu_disable_clks(gmu);
	gmu_disable_clks(device);
	gmu_disable_gdsc(gmu);
	dev_err(&gmu->pdev->dev, "Suspended GMU\n");
	return 0;
@@ -1547,9 +1560,9 @@ static int gmu_start(struct kgsl_device *device)
	switch (device->state) {
	case KGSL_STATE_INIT:
	case KGSL_STATE_SUSPEND:
		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
		WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags));
		gmu_enable_gdsc(gmu);
		gmu_enable_clks(gmu);
		gmu_enable_clks(device);
		gmu_dev_ops->irq_enable(device);

		/* Vote for 300MHz DDR for GMU to init */
@@ -1574,9 +1587,9 @@ static int gmu_start(struct kgsl_device *device)
		break;

	case KGSL_STATE_SLUMBER:
		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
		WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags));
		gmu_enable_gdsc(gmu);
		gmu_enable_clks(gmu);
		gmu_enable_clks(device);
		gmu_dev_ops->irq_enable(device);

		ret = gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
@@ -1593,10 +1606,10 @@ static int gmu_start(struct kgsl_device *device)

	case KGSL_STATE_RESET:
		if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv) ||
			test_bit(GMU_FAULT, &gmu->flags)) {
			test_bit(GMU_FAULT, &device->gmu_core.flags)) {
			gmu_suspend(device);
			gmu_enable_gdsc(gmu);
			gmu_enable_clks(gmu);
			gmu_enable_clks(device);
			gmu_dev_ops->irq_enable(device);

			ret = gmu_dev_ops->rpmh_gpu_pwrctrl(
@@ -1646,7 +1659,7 @@ static void gmu_stop(struct kgsl_device *device)
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
	int ret = 0;

	if (!test_bit(GMU_CLK_ON, &gmu->flags))
	if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags))
		return;

	/* Wait for the lowest idle level we requested */
@@ -1668,7 +1681,7 @@ static void gmu_stop(struct kgsl_device *device)
	hfi_stop(gmu);

	gmu_dev_ops->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
	gmu_disable_clks(gmu);
	gmu_disable_clks(device);
	gmu_disable_gdsc(gmu);

	msm_bus_scale_client_update_request(gmu->pcl, 0);
@@ -1680,7 +1693,7 @@ static void gmu_stop(struct kgsl_device *device)
	 * Set GMU_FAULT flag to indicate to power contrller
	 * that hang recovery is needed to power on GPU
	 */
	set_bit(GMU_FAULT, &gmu->flags);
	set_bit(GMU_FAULT, &device->gmu_core.flags);
	dev_err(&gmu->pdev->dev, "Failed to stop GMU\n");
	gmu_core_snapshot(device);
}
@@ -1727,11 +1740,6 @@ static void gmu_remove(struct kgsl_device *device)
		gmu->pcl = 0;
	}

	if (gmu->reg_virt) {
		devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
		gmu->reg_virt = NULL;
	}

	gmu_memory_close(gmu);

	for (i = 0; i < MAX_GMU_CLKS; i++) {
@@ -1751,86 +1759,12 @@ static void gmu_remove(struct kgsl_device *device)
		gmu->cx_gdsc = NULL;
	}

	gmu->flags = 0;
	device->gmu_core.flags = 0;
	device->gmu_core.ptr = NULL;
	gmu->pdev = NULL;
	kfree(gmu);
}

static void gmu_regwrite(struct kgsl_device *device,
				unsigned int offsetwords, unsigned int value)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	void __iomem *reg;

	trace_kgsl_regwrite(device, offsetwords, value);

	offsetwords -= device->gmu_core.gmu2gpu_offset;
	reg = gmu->reg_virt + (offsetwords << 2);

	/*
	 * ensure previous writes post before this one,
	 * i.e. act like normal writel()
	 */
	wmb();
	__raw_writel(value, reg);
}

static void gmu_regread(struct kgsl_device *device,
		unsigned int offsetwords, unsigned int *value)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
	void __iomem *reg;

	offsetwords -= device->gmu_core.gmu2gpu_offset;

	reg = gmu->reg_virt + (offsetwords << 2);

	*value = __raw_readl(reg);

	/*
	 * ensure this read finishes before the next one.
	 * i.e. act like normal readl()
	 */
	rmb();
}

/* Check if GPMU is in charge of power features */
static bool gmu_gpmu_isenabled(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	return test_bit(GMU_GPMU, &gmu->flags);
}

/* Check if GMU is enabled. Only set once GMU is fully initialized */
static bool gmu_isenabled(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	return test_bit(GMU_ENABLED, &gmu->flags);
}

static void gmu_set_bit(struct kgsl_device *device, enum gmu_core_flags flag)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	set_bit(flag, &gmu->flags);
}

static void gmu_clear_bit(struct kgsl_device *device, enum gmu_core_flags flag)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	clear_bit(flag, &gmu->flags);
}

static int gmu_test_bit(struct kgsl_device *device, enum gmu_core_flags flag)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);

	return test_bit(flag, &gmu->flags);
}

static bool gmu_regulator_isenabled(struct kgsl_device *device)
{
	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
@@ -1838,19 +1772,11 @@ static bool gmu_regulator_isenabled(struct kgsl_device *device)
	return (gmu->gx_gdsc &&	regulator_is_enabled(gmu->gx_gdsc));
}


struct gmu_core_ops gmu_ops = {
	.probe = gmu_probe,
	.remove = gmu_remove,
	.regread = gmu_regread,
	.regwrite = gmu_regwrite,
	.isenabled = gmu_isenabled,
	.gpmu_isenabled = gmu_gpmu_isenabled,
	.start = gmu_start,
	.stop = gmu_stop,
	.set_bit = gmu_set_bit,
	.clear_bit = gmu_clear_bit,
	.test_bit = gmu_test_bit,
	.dcvs_set = gmu_dcvs_set,
	.snapshot = gmu_snapshot,
	.regulator_isenabled = gmu_regulator_isenabled,
+0 −4
Original line number Diff line number Diff line
@@ -103,7 +103,6 @@ enum gmu_load_mode {
 * struct gmu_device - GMU device structure
 * @ver: GMU FW version, read from GMU
 * @reg_phys: GMU CSR physical address
 * @reg_virt: GMU CSR virtual address
 * @reg_len: GMU CSR range
 * @gmu_interrupt_num: GMU interrupt number
 * @fw_image: descriptor of GMU memory that has GMU image in it
@@ -127,7 +126,6 @@ enum gmu_load_mode {
 * @gx_gdsc: GX headswitch that controls power of GPU subsystem
 * @clks: GPU subsystem clocks required for GMU functionality
 * @load_mode: GMU FW load/boot mode
 * @flags: GMU flags
 * @wakeup_pwrlevel: GPU wake up power/DCVS level in case different
 *		than default power level
 * @pcl: GPU BW scaling client
@@ -139,7 +137,6 @@ struct gmu_device {
	unsigned int ver;
	struct platform_device *pdev;
	unsigned long reg_phys;
	void __iomem *reg_virt;
	unsigned int reg_len;
	unsigned int gmu_interrupt_num;
	struct gmu_memdesc cached_fw_image;
@@ -165,7 +162,6 @@ struct gmu_device {
	struct regulator *gx_gdsc;
	struct clk *clks[MAX_GMU_CLKS];
	enum gmu_load_mode load_mode;
	unsigned long flags;
	unsigned int wakeup_pwrlevel;
	unsigned int pcl;
	unsigned int ccl;
Loading