Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7d2e595c authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: hyp assign secure pages to HLOS during hibernation"

parents 6fb0094f 38145572
Loading
Loading
Loading
Loading
+83 −17
Original line number Diff line number Diff line
@@ -1851,7 +1851,7 @@ int adreno_set_unsecured_mode(struct adreno_device *adreno_dev,
	}

	/* GPU comes up in secured mode, make it unsecured by default */
	if (adreno_dev->zap_loaded)
	if (adreno_dev->zap_handle_ptr)
		ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
	else
		adreno_writereg(adreno_dev,
@@ -1876,6 +1876,26 @@ static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
	}
}

static int adreno_program_smmu_aperture(struct kgsl_device *device)
{
	unsigned long start = jiffies;
	int ret;

	if (!scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID))
		return 0;

	ret = kgsl_program_smmu_aperture();
	if (ret)
		dev_err(device->dev,
		    "SMMU aperture programming call failed error %d\n",
		    ret);
	else if (jiffies_to_msecs(jiffies - start) > 2000)
		dev_err(device->dev,
		    "scm call took a long time to finish: %u ms\n",
		    jiffies_to_msecs(jiffies - start));

	return ret;
}
/**
 * _adreno_start - Power up the GPU and prepare to accept commands
 * @adreno_dev: Pointer to an adreno_device structure
@@ -1926,21 +1946,10 @@ static int _adreno_start(struct adreno_device *adreno_dev)


	if (adreno_is_a640v1(adreno_dev)) {
		unsigned long start = jiffies;

		if (scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID)) {
			ret = kgsl_program_smmu_aperture();
			/* Log it if it takes more than 2 seconds */
			if (((jiffies - start) / HZ) > 2)
				dev_err(device->dev, "scm call took too long to finish on a640v1: %lu seconds\n",
					((jiffies - start) / HZ));
			if (ret) {
				dev_err(device->dev, "SMMU aperture programming call failed with error %d\n",
					ret);
		ret = adreno_program_smmu_aperture(device);
		if (ret)
			goto error_pwr_off;
	}
		}
	}

	adreno_ringbuffer_set_global(adreno_dev, 0);

@@ -4022,6 +4031,63 @@ static void adreno_gpu_model(struct kgsl_device *device, char *str,
			 ADRENO_CHIPID_PATCH(adreno_dev->chipid) + 1);
}

static int adreno_suspend_device(struct kgsl_device *device,
				pm_message_t pm_state)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int pm_event = pm_state.event;
	int ret = 0;

	if (device->state == KGSL_STATE_SUSPEND)
		adreno_dispatcher_halt(device);

	if (pm_event != PM_EVENT_SUSPEND) {
		if (gpudev->zap_shader_unload != NULL)
			gpudev->zap_shader_unload(adreno_dev);

		if (gmu_core_isenabled(device)) {
			clear_bit(GMU_BOOT_INIT_DONE, &device->gmu_core.flags);
			clear_bit(GMU_RSCC_SLEEP_SEQ_DONE,
						&device->gmu_core.flags);
		}

		if (gpudev->secure_pt_hibernate != NULL)
			ret = gpudev->secure_pt_hibernate(adreno_dev);
	}

	return ret;
}

static int adreno_resume_device(struct kgsl_device *device,
				pm_message_t pm_state)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int pm_event = pm_state.event;
	int ret;

	if (pm_event != PM_EVENT_RESUME) {
		if (gpudev->secure_pt_restore != NULL) {
			ret = gpudev->secure_pt_restore(adreno_dev);
			if (ret)
				return ret;
		}

		if (!adreno_is_a640v1(adreno_dev) &&
			kgsl_mmu_is_perprocess(&device->mmu)) {
			ret = adreno_program_smmu_aperture(device);
			if (ret)
				return ret;
		}
	}

	if (device->state == KGSL_STATE_SUSPEND)
		adreno_dispatcher_unhalt(device);

	return 0;
}

static const struct kgsl_functable adreno_functable = {
	/* Mandatory functions */
	.regread = adreno_regread,
@@ -4063,8 +4129,8 @@ static const struct kgsl_functable adreno_functable = {
	.clk_set_options = adreno_clk_set_options,
	.gpu_model = adreno_gpu_model,
	.stop_fault_timer = adreno_dispatcher_stop_fault_timer,
	.dispatcher_halt = adreno_dispatcher_halt,
	.dispatcher_unhalt = adreno_dispatcher_unhalt,
	.suspend_device = adreno_suspend_device,
	.resume_device = adreno_resume_device,
};

static struct platform_driver adreno_platform_driver = {
+4 −1
Original line number Diff line number Diff line
@@ -600,7 +600,7 @@ struct adreno_device {
	bool gpu_llc_slice_enable;
	void *gpuhtw_llc_slice;
	bool gpuhtw_llc_slice_enable;
	unsigned int zap_loaded;
	void *zap_handle_ptr;
	unsigned int soc_hw_rev;
	bool gaming_bin;
};
@@ -1038,6 +1038,9 @@ struct adreno_gpudev {
				bool update_reg);
	size_t (*snapshot_preemption)(struct kgsl_device *, u8 *,
				 size_t, void *);
	void (*zap_shader_unload)(struct adreno_device *);
	int (*secure_pt_hibernate)(struct adreno_device *);
	int (*secure_pt_restore)(struct adreno_device *);
};

/**
+20 −9
Original line number Diff line number Diff line
@@ -2186,11 +2186,11 @@ static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
 */
static int a5xx_microcode_load(struct adreno_device *adreno_dev)
{
	void *ptr;
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
	struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
	uint64_t gpuaddr;
	int ret = 0;

	gpuaddr = pm4_fw->memdesc.gpuaddr;
	kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_LO,
@@ -2216,9 +2216,8 @@ static int a5xx_microcode_load(struct adreno_device *adreno_dev)
	 * appropriate register,
	 * skip if retention is supported for the CPZ register
	 */
	if (adreno_dev->zap_loaded && !(ADRENO_FEATURE(adreno_dev,
	if (adreno_dev->zap_handle_ptr && !(ADRENO_FEATURE(adreno_dev,
		ADRENO_CPZ_RETENTION))) {
		int ret;
		struct scm_desc desc = {0};

		desc.args[0] = 0;
@@ -2234,16 +2233,27 @@ static int a5xx_microcode_load(struct adreno_device *adreno_dev)
	}

	/* Load the zap shader firmware through PIL if its available */
	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
		ptr = subsystem_get(adreno_dev->gpucore->zap_name);
	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_handle_ptr) {
		adreno_dev->zap_handle_ptr =
				subsystem_get(adreno_dev->gpucore->zap_name);

		/* Return error if the zap shader cannot be loaded */
		if (IS_ERR_OR_NULL(ptr))
			return (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
		adreno_dev->zap_loaded = 1;
		if (IS_ERR_OR_NULL(adreno_dev->zap_handle_ptr)) {
			ret = (adreno_dev->zap_handle_ptr == NULL) ?
				-ENODEV : PTR_ERR(adreno_dev->zap_handle_ptr);
			adreno_dev->zap_handle_ptr = NULL;
		}
	}

	return 0;
	return ret;
}

static void a5xx_zap_shader_unload(struct adreno_device *adreno_dev)
{
	if (!IS_ERR_OR_NULL(adreno_dev->zap_handle_ptr)) {
		subsystem_put(adreno_dev->zap_handle_ptr);
		adreno_dev->zap_handle_ptr = NULL;
	}
}

static int _me_init_ucode_workarounds(struct adreno_device *adreno_dev)
@@ -3636,4 +3646,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.enable_64bit = a5xx_enable_64bit,
	.clk_set_options = a5xx_clk_set_options,
	.snapshot_preemption = a5xx_snapshot_preemption,
	.zap_shader_unload = a5xx_zap_shader_unload,
};
+84 −8
Original line number Diff line number Diff line
@@ -970,7 +970,6 @@ static int a6xx_microcode_load(struct adreno_device *adreno_dev)
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
	uint64_t gpuaddr;
	void *zap;
	int ret = 0;

	gpuaddr = fw->memdesc.gpuaddr;
@@ -987,20 +986,28 @@ static int a6xx_microcode_load(struct adreno_device *adreno_dev)
		return 0;

	/* Load the zap shader firmware through PIL if its available */
	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
		zap = subsystem_get(adreno_dev->gpucore->zap_name);
	if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_handle_ptr) {
		adreno_dev->zap_handle_ptr =
				subsystem_get(adreno_dev->gpucore->zap_name);

		/* Return error if the zap shader cannot be loaded */
		if (IS_ERR_OR_NULL(zap)) {
			ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
			zap = NULL;
		} else
			adreno_dev->zap_loaded = 1;
		if (IS_ERR_OR_NULL(adreno_dev->zap_handle_ptr)) {
			ret = (adreno_dev->zap_handle_ptr == NULL) ?
				-ENODEV : PTR_ERR(adreno_dev->zap_handle_ptr);
			adreno_dev->zap_handle_ptr = NULL;
		}
	}

	return ret;
}

static void a6xx_zap_shader_unload(struct adreno_device *adreno_dev)
{
	if (!IS_ERR_OR_NULL(adreno_dev->zap_handle_ptr)) {
		subsystem_put(adreno_dev->zap_handle_ptr);
		adreno_dev->zap_handle_ptr = NULL;
	}
}

/*
 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
@@ -3226,6 +3233,72 @@ static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
	}
}

/*
 * Secure buffers cannot be preserved during hibernation.
 * Issue hyp_assign call to assign non-used internal secure
 * buffers to kernel.
 * This function will fail if there is an active secure context
 * since we cannot remove the content from user secure buffer.
 */
static int a6xx_secure_pt_hibernate(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	unsigned int i = 0;
	int ret;

	if (adreno_drawctxt_has_secure(device)) {
		KGSL_DRV_ERR(device,
		    "Secure context is active, cannot hibernate secure PT\n");
		goto fail;
	}

	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		if (rb->secure_preemption_desc.sgt) {
			ret = kgsl_unlock_sgt(rb->secure_preemption_desc.sgt);
			if (ret) {
				KGSL_DRV_ERR(device,
				    "kgsl_unlock_sgt failed ret %d\n", ret);
				goto fail;
			}
		}
	}

	return 0;

fail:
	while (i > 0) {
		rb = &(adreno_dev->ringbuffers[i - 1]);
		if (rb->secure_preemption_desc.sgt)
			kgsl_lock_sgt(rb->secure_preemption_desc.sgt,
					rb->secure_preemption_desc.size);
		i--;
	}
	return -EBUSY;
}

static int a6xx_secure_pt_restore(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	unsigned int i;
	int ret;

	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		if (rb->secure_preemption_desc.sgt) {
			ret = kgsl_lock_sgt(rb->secure_preemption_desc.sgt,
					rb->secure_preemption_desc.size);
			if (ret) {
				KGSL_DRV_ERR(device,
				    "kgsl_lock_sgt failed ret %d\n", ret);
				return ret;
			}
		}
	}

	return 0;
}

struct adreno_gpudev adreno_a6xx_gpudev = {
	.reg_offsets = &a6xx_reg_offsets,
	.start = a6xx_start,
@@ -3266,4 +3339,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
	.clk_set_options = a6xx_clk_set_options,
	.snapshot_preemption = a6xx_snapshot_preemption,
	.zap_shader_unload = a6xx_zap_shader_unload,
	.secure_pt_hibernate = a6xx_secure_pt_hibernate,
	.secure_pt_restore = a6xx_secure_pt_restore,
};
+18 −0
Original line number Diff line number Diff line
@@ -656,3 +656,21 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
	rb->drawctxt_active = drawctxt;
	return 0;
}

bool adreno_drawctxt_has_secure(struct kgsl_device *device)
{
	struct kgsl_context *context;
	int id;

	read_lock(&device->context_lock);
	idr_for_each_entry(&device->context_idr, context, id) {
		if (context->flags & KGSL_CONTEXT_SECURE) {
			read_unlock(&device->context_lock);
			return true;
		}
	}
	read_unlock(&device->context_lock);

	return false;
}
Loading