Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3eaa1c6 authored by Tarun Karra's avatar Tarun Karra
Browse files

msm: kgsl: Avoid glitch in GPU during power collapse



During GPU power collapse there is a possiblity of glitch.
Avoid this by mutually excluding SMMU operations during
GPU power collapse regulator disable. In addition to this
poll for regulators to be off to avoid glitch as per the
recommendation of hardware team.

Change-Id: I887c9d2ab84ef308b9523ea316a2617ea2a2c8ce
Signed-off-by: default avatarTarun Karra <tkarra@codeaurora.org>
parent 37bc52a5
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -127,6 +127,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
		.major = 3,
		.minor = 0,
		.patchid = 0,
		.features = ADRENO_SYNC_SMMU_PC,
		.pm4fw_name = "a530v1_pm4.fw",
		.pfpfw_name = "a530v1_pfp.fw",
		.gpudev = &adreno_a5xx_gpudev,
@@ -141,7 +142,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
		.minor = 0,
		.patchid = ANY_ID,
		.features = ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM |
							ADRENO_PREEMPTION,
				ADRENO_PREEMPTION | ADRENO_SYNC_SMMU_PC,
		.pm4fw_name = "a530_pm4.fw",
		.pfpfw_name = "a530_pfp.fw",
		.zap_name = "a530_zap",
+71 −6
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/of_coresight.h>
#include <linux/input.h>
#include <soc/qcom/scm.h>

#include <linux/msm-bus-board.h>
#include <linux/msm-bus.h>
@@ -1352,8 +1353,9 @@ static int _adreno_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int status = -EINVAL;
	int i, status = -EINVAL;
	unsigned int state = device->state;
	unsigned int regulator_left_on = 0;
	unsigned int pmqos_wakeup_vote = device->pwrctrl.pm_qos_wakeup_latency;
	unsigned int pmqos_active_vote = device->pwrctrl.pm_qos_active_latency;

@@ -1365,20 +1367,27 @@ static int _adreno_start(struct adreno_device *adreno_dev)

	kgsl_cffdump_open(device);

	for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
		if (device->pwrctrl.gpu_reg[i] &&
			regulator_is_enabled(device->pwrctrl.gpu_reg[i])) {
			regulator_left_on = 1;
			break;
		}
	}

	/* Clear any GPU faults that might have been left over */
	adreno_clear_gpu_fault(adreno_dev);

	/* Put the GPU in a responsive state */
	device->regulator_left_on = false;
	status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
	if (status)
		goto error_pwr_off;

	/* Clear any GPU faults that might have been left over */
	adreno_clear_gpu_fault(adreno_dev);

	/* Set the bit to indicate that we've just powered on */
	set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);

	/* Soft reset the GPU if a regulator is stuck on*/
	if (device->regulator_left_on)
	if (regulator_left_on)
		_soft_reset(adreno_dev);

	status = kgsl_mmu_start(device);
@@ -2667,6 +2676,61 @@ static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
					postlevel, post);
}

static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct scm_desc desc = {0};
	int ret;

	if (!ADRENO_FEATURE(adreno_dev, ADRENO_SYNC_SMMU_PC))
		return;

	if (sync == true) {
		mutex_lock(&device->mutex_mmu_sync);
		desc.args[0] = true;
		desc.arginfo = SCM_ARGS(1);
		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR, 0x8), &desc);
		if (ret)
			KGSL_DRV_ERR(device,
				"MMU sync with Hypervisor off %x\n", ret);
	} else {
		desc.args[0] = false;
		desc.arginfo = SCM_ARGS(1);
		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR, 0x8), &desc);
		mutex_unlock(&device->mutex_mmu_sync);
	}
}

static void adreno_regulator_disable_poll(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	unsigned long wait_time = jiffies + msecs_to_jiffies(200);
	int i, rail_on = 1;

	if (ADRENO_FEATURE(adreno_dev, ADRENO_SYNC_SMMU_PC)) {
		adreno_iommu_sync(device, true);
		/* Turn off CX and then GX as recommened by HW team */
		for (i = 0; i <= KGSL_MAX_REGULATORS - 1; i++) {
			if (pwr->gpu_reg[i])
				regulator_disable(pwr->gpu_reg[i]);
			while (!time_after(jiffies, wait_time)) {
				if (regulator_is_enabled(pwr->gpu_reg[i]) == 0)
					rail_on = 0;
				cpu_relax();
			}
			if (rail_on)
				KGSL_CORE_ERR("%s regulator on after 200ms\n",
					pwr->gpu_reg_name[i]);
		}
		adreno_iommu_sync(device, false);
	} else {
		for (i = KGSL_MAX_REGULATORS - 1; i >= 0; i--)
			if (pwr->gpu_reg[i])
				regulator_disable(pwr->gpu_reg[i]);
	}
}

static const struct kgsl_functable adreno_functable = {
	/* Mandatory functions */
	.regread = adreno_regread,
@@ -2702,6 +2766,7 @@ static const struct kgsl_functable adreno_functable = {
	.is_hw_collapsible = adreno_is_hw_collapsible,
	.regulator_disable = adreno_regulator_disable,
	.pwrlevel_change_settings = adreno_pwrlevel_change_settings,
	.regulator_disable_poll = adreno_regulator_disable_poll,
};

static struct platform_driver adreno_platform_driver = {
+2 −0
Original line number Diff line number Diff line
@@ -104,6 +104,8 @@
#define ADRENO_LM BIT(9)
/* The core uses 64 bit GPU addresses */
#define ADRENO_64BIT BIT(10)
/* Sync between SMMU operations and power collapse */
#define ADRENO_SYNC_SMMU_PC BIT(11)

/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_NONE             0
+3 −3
Original line number Diff line number Diff line
@@ -177,6 +177,7 @@ struct kgsl_functable {
	void (*regulator_disable)(struct kgsl_device *);
	void (*pwrlevel_change_settings)(struct kgsl_device *device,
		unsigned int prelevel, unsigned int postlevel, bool post);
	void (*regulator_disable_poll)(struct kgsl_device *device);
};

struct kgsl_ioctl {
@@ -250,7 +251,7 @@ struct kgsl_device {
	int open_count;

	struct mutex mutex;
	struct mutex mutex_pc_smmu;
	struct mutex mutex_mmu_sync;
	uint32_t state;
	uint32_t requested_state;

@@ -290,7 +291,6 @@ struct kgsl_device {
	struct workqueue_struct *events_wq;

	struct device *busmondev; /* pseudo dev for GPU BW voting governor */
	bool regulator_left_on;
};


@@ -305,7 +305,7 @@ struct kgsl_device {
	.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
	.active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
	.mutex = __MUTEX_INITIALIZER((_dev).mutex),\
	.mutex_pc_smmu = __MUTEX_INITIALIZER((_dev).mutex_pc_smmu),\
	.mutex_mmu_sync = __MUTEX_INITIALIZER((_dev).mutex_mmu_sync),\
	.state = KGSL_STATE_NONE,\
	.ver_major = DRIVER_VERSION_MAJOR,\
	.ver_minor = DRIVER_VERSION_MINOR
+24 −13
Original line number Diff line number Diff line
@@ -85,6 +85,18 @@ static int kgsl_iommu_flush_pt(struct kgsl_mmu *mmu);
static phys_addr_t
kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu);

static inline void _iommu_sync_mmu_pc(struct kgsl_device *device, bool lock)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

	if (!ADRENO_FEATURE(adreno_dev, ADRENO_SYNC_SMMU_PC))
		return;
	if (lock)
		mutex_lock(&device->mutex_mmu_sync);
	else
		mutex_unlock(&device->mutex_mmu_sync);
}

/*
 * kgsl_iommu_get_pt_base_addr - Get the physical address of the pagetable
 * @mmu - Pointer to mmu
@@ -1176,17 +1188,17 @@ kgsl_iommu_unmap(struct kgsl_pagetable *pt,
		mutex_lock(&device->mutex);
		ret = kgsl_active_count_get(device);
		if (!ret) {
			mutex_lock(&device->mutex_pc_smmu);
			_iommu_sync_mmu_pc(device, true);
			unmapped = iommu_unmap(iommu_pt->domain, gpuaddr,
					range);
			mutex_unlock(&device->mutex_pc_smmu);
			_iommu_sync_mmu_pc(device, false);
			kgsl_active_count_put(device);
		}
		mutex_unlock(&device->mutex);
	} else {
		mutex_lock(&device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(device, true);
		unmapped = iommu_unmap(iommu_pt->domain, gpuaddr, range);
		mutex_unlock(&device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(device, false);
	}
	if (unmapped != range) {
		KGSL_CORE_ERR(
@@ -1248,11 +1260,11 @@ int _iommu_add_guard_page(struct kgsl_pagetable *pt,
			physaddr = kgsl_secure_guard_page_memdesc.physaddr;
		}

		mutex_lock(&pt->mmu->device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(pt->mmu->device, true);
		ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr,
				kgsl_memdesc_guard_page_size(memdesc),
				protflags & ~IOMMU_WRITE);
		mutex_unlock(&pt->mmu->device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(pt->mmu->device, false);
		if (ret) {
			KGSL_CORE_ERR(
			"iommu_map(%p, addr %016llX, flags %x) err: %d\n",
@@ -1296,22 +1308,21 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
		mutex_lock(&device->mutex);
		ret = kgsl_active_count_get(device);
		if (!ret) {
			mutex_lock(&device->mutex_pc_smmu);
			_iommu_sync_mmu_pc(device, true);
			mapped = iommu_map_sg(iommu_pt->domain, addr,
					memdesc->sgt->sgl, memdesc->sgt->nents,
					flags);
			mutex_unlock(&device->mutex_pc_smmu);
			_iommu_sync_mmu_pc(device, false);
			kgsl_active_count_put(device);
		}
		mutex_unlock(&device->mutex);
	} else {
		mutex_lock(&device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(device, true);
		mapped = iommu_map_sg(iommu_pt->domain, addr,
				memdesc->sgt->sgl, memdesc->sgt->nents, flags);
		mutex_unlock(&device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(device, false);
	}


	if (mapped != size) {
		KGSL_CORE_ERR("iommu_map_sg(%p, %016llX, %lld, %x) err: %zd\n",
				iommu_pt->domain, addr, size,
@@ -1322,9 +1333,9 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
	ret = _iommu_add_guard_page(pt, memdesc, addr + size, flags);
	if (ret) {
		/* cleanup the partial mapping */
		mutex_lock(&device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(device, true);
		iommu_unmap(iommu_pt->domain, addr, size);
		mutex_unlock(&device->mutex_pc_smmu);
		_iommu_sync_mmu_pc(device, false);
	}

	/*
Loading