Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 38145572 authored by Thomas (Wonyoung) Yun's avatar Thomas (Wonyoung) Yun
Browse files

msm: kgsl: hyp assign secure pages to HLOS during hibernation



Secure memory cannot be preserved during hibernation.
This change is to first check and fail the hibernation
if there is secure context. Then hyp assign kgsl internal
secure buffer to HLOS during hibernation.

Change-Id: I8879294ed357ef09201bd430bdbece84de324fa9
Signed-off-by: default avatarThomas (Wonyoung) Yun <wyun@codeaurora.org>
parent 59eb231e
Loading
Loading
Loading
Loading
+21 −7
Original line number Diff line number Diff line
@@ -4031,12 +4031,13 @@ static void adreno_gpu_model(struct kgsl_device *device, char *str,
			 ADRENO_CHIPID_PATCH(adreno_dev->chipid) + 1);
}

static void adreno_suspend_device(struct kgsl_device *device,
static int adreno_suspend_device(struct kgsl_device *device,
				pm_message_t pm_state)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int pm_event = pm_state.event;
	int ret = 0;

	if (device->state == KGSL_STATE_SUSPEND)
		adreno_dispatcher_halt(device);
@@ -4050,23 +4051,36 @@ static void adreno_suspend_device(struct kgsl_device *device,
			clear_bit(GMU_RSCC_SLEEP_SEQ_DONE,
						&device->gmu_core.flags);
		}

		if (gpudev->secure_pt_hibernate != NULL)
			ret = gpudev->secure_pt_hibernate(adreno_dev);
	}

	return ret;
}

static int adreno_resume_device(struct kgsl_device *device,
				pm_message_t pm_state)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int pm_event = pm_state.event;
	int ret;

	if ((pm_event != PM_EVENT_RESUME) &&
		!adreno_is_a640v1(adreno_dev) &&
	if (pm_event != PM_EVENT_RESUME) {
		if (gpudev->secure_pt_restore != NULL) {
			ret = gpudev->secure_pt_restore(adreno_dev);
			if (ret)
				return ret;
		}

		if (!adreno_is_a640v1(adreno_dev) &&
			kgsl_mmu_is_perprocess(&device->mmu)) {
			ret = adreno_program_smmu_aperture(device);
			if (ret)
				return ret;
		}
	}

	if (device->state == KGSL_STATE_SUSPEND)
		adreno_dispatcher_unhalt(device);
+2 −0
Original line number Diff line number Diff line
@@ -1039,6 +1039,8 @@ struct adreno_gpudev {
	size_t (*snapshot_preemption)(struct kgsl_device *, u8 *,
				 size_t, void *);
	void (*zap_shader_unload)(struct adreno_device *);
	int (*secure_pt_hibernate)(struct adreno_device *);
	int (*secure_pt_restore)(struct adreno_device *);
};

/**
+68 −0
Original line number Diff line number Diff line
@@ -3233,6 +3233,72 @@ static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
	}
}

/*
 * Secure buffers cannot be preserved during hibernation.
 * Issue hyp_assign call to assign non-used internal secure
 * buffers to kernel.
 * This function will fail if there is an active secure context
 * since we cannot remove the content from user secure buffer.
 */
static int a6xx_secure_pt_hibernate(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	unsigned int i = 0;
	int ret;

	if (adreno_drawctxt_has_secure(device)) {
		KGSL_DRV_ERR(device,
		    "Secure context is active, cannot hibernate secure PT\n");
		goto fail;
	}

	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		if (rb->secure_preemption_desc.sgt) {
			ret = kgsl_unlock_sgt(rb->secure_preemption_desc.sgt);
			if (ret) {
				KGSL_DRV_ERR(device,
				    "kgsl_unlock_sgt failed ret %d\n", ret);
				goto fail;
			}
		}
	}

	return 0;

fail:
	while (i > 0) {
		rb = &(adreno_dev->ringbuffers[i - 1]);
		if (rb->secure_preemption_desc.sgt)
			kgsl_lock_sgt(rb->secure_preemption_desc.sgt,
					rb->secure_preemption_desc.size);
		i--;
	}
	return -EBUSY;
}

static int a6xx_secure_pt_restore(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	unsigned int i;
	int ret;

	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		if (rb->secure_preemption_desc.sgt) {
			ret = kgsl_lock_sgt(rb->secure_preemption_desc.sgt,
					rb->secure_preemption_desc.size);
			if (ret) {
				KGSL_DRV_ERR(device,
				    "kgsl_lock_sgt failed ret %d\n", ret);
				return ret;
			}
		}
	}

	return 0;
}

struct adreno_gpudev adreno_a6xx_gpudev = {
	.reg_offsets = &a6xx_reg_offsets,
	.start = a6xx_start,
@@ -3274,4 +3340,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.clk_set_options = a6xx_clk_set_options,
	.snapshot_preemption = a6xx_snapshot_preemption,
	.zap_shader_unload = a6xx_zap_shader_unload,
	.secure_pt_hibernate = a6xx_secure_pt_hibernate,
	.secure_pt_restore = a6xx_secure_pt_restore,
};
+18 −0
Original line number Diff line number Diff line
@@ -656,3 +656,21 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
	rb->drawctxt_active = drawctxt;
	return 0;
}

bool adreno_drawctxt_has_secure(struct kgsl_device *device)
{
	struct kgsl_context *context;
	int id;

	read_lock(&device->context_lock);
	idr_for_each_entry(&device->context_idr, context, id) {
		if (context->flags & KGSL_CONTEXT_SECURE) {
			read_unlock(&device->context_lock);
			return true;
		}
	}
	read_unlock(&device->context_lock);

	return false;
}
+3 −1
Original line number Diff line number Diff line
/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -151,4 +151,6 @@ static inline const char *get_api_type_str(unsigned int type)
	}
	return "UNKNOWN";
}

bool adreno_drawctxt_has_secure(struct kgsl_device *device);
#endif  /* __ADRENO_DRAWCTXT_H */
Loading