Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db969894 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Support global secure memory objects"

parents 37db9fec 37a1e58f
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
 */
#ifndef __ADRENO_H
#define __ADRENO_H
@@ -531,6 +531,11 @@ struct adreno_device {
	 * @critpkts: Memory descriptor for 5xx critical packets if applicable
	 */
	struct kgsl_memdesc *critpkts;
	/**
	 * @critpkts: Memory descriptor for 5xx secure critical packets
	 */
	struct kgsl_memdesc *critpkts_secure;

};

/**
@@ -800,7 +805,6 @@ struct adreno_gpudev {
				struct adreno_device *adreno_dev,
				unsigned int *cmds);
	int (*preemption_init)(struct adreno_device *adreno_dev);
	void (*preemption_close)(struct adreno_device *adreno_dev);
	void (*preemption_schedule)(struct adreno_device *adreno_dev);
	int (*preemption_context_init)(struct kgsl_context *context);
	void (*preemption_context_destroy)(struct kgsl_context *context);
+10 −36
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
 */

#include <linux/delay.h>
@@ -18,7 +18,6 @@

static int critical_packet_constructed;
static unsigned int crit_pkts_dwords;
static struct kgsl_memdesc crit_pkts_refbuf0;

static void a5xx_irq_storm_worker(struct work_struct *work);
static int _read_fw2_block_header(struct kgsl_device *device,
@@ -121,14 +120,6 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev)
	a5xx_check_features(adreno_dev);
}

static void a5xx_critical_packet_destroy(struct adreno_device *adreno_dev)
{
	kgsl_iommu_unmap_global_secure_pt_entry(KGSL_DEVICE(adreno_dev),
			&crit_pkts_refbuf0);
	kgsl_sharedmem_free(&crit_pkts_refbuf0);

}

static void _do_fixup(const struct adreno_critical_fixup *fixups, int count,
		uint64_t *gpuaddrs, unsigned int *buffer)
{
@@ -146,29 +137,23 @@ static void _do_fixup(const struct adreno_critical_fixup *fixups, int count,

static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
{

	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	unsigned int *cmds;
	uint64_t gpuaddrs[4];
	int ret;

	adreno_dev->critpkts = kgsl_allocate_global(KGSL_DEVICE(adreno_dev),
	adreno_dev->critpkts = kgsl_allocate_global(device,
		PAGE_SIZE * 4, 0, 0, "crit_pkts");
	if (IS_ERR(adreno_dev->critpkts))
		return PTR_ERR(adreno_dev->critpkts);

	ret = kgsl_allocate_user(&adreno_dev->dev, &crit_pkts_refbuf0,
		PAGE_SIZE, KGSL_MEMFLAGS_SECURE, 0);
	if (ret)
		return ret;

	ret = kgsl_iommu_map_global_secure_pt_entry(&adreno_dev->dev,
					&crit_pkts_refbuf0);
	if (ret)
		return ret;
	adreno_dev->critpkts_secure = kgsl_allocate_global(device,
		PAGE_SIZE, KGSL_MEMFLAGS_SECURE, 0, "crit_pkts_secure");
	if (IS_ERR(adreno_dev->critpkts_secure))
		return PTR_ERR(adreno_dev->critpkts_secure);

	cmds = adreno_dev->critpkts->hostptr;

	gpuaddrs[0] = crit_pkts_refbuf0.gpuaddr;
	gpuaddrs[0] = adreno_dev->critpkts_secure->gpuaddr;
	gpuaddrs[1] = adreno_dev->critpkts->gpuaddr + PAGE_SIZE;
	gpuaddrs[2] = adreno_dev->critpkts->gpuaddr + (PAGE_SIZE * 2);
	gpuaddrs[3] = adreno_dev->critpkts->gpuaddr + (PAGE_SIZE * 3);
@@ -211,23 +196,13 @@ static void a5xx_init(struct adreno_device *adreno_dev)

	INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker);

	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
		int ret;
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS))
		a5xx_critical_packet_construct(adreno_dev);

		ret = a5xx_critical_packet_construct(adreno_dev);
		if (ret)
			a5xx_critical_packet_destroy(adreno_dev);
	}

	a5xx_crashdump_init(adreno_dev);
}

static void a5xx_remove(struct adreno_device *adreno_dev)
{
	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS))
		a5xx_critical_packet_destroy(adreno_dev);
}

const static struct {
	u32 reg;
	u32 base;
@@ -3003,7 +2978,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.irq_trace = trace_kgsl_a5xx_irq_status,
	.platform_setup = a5xx_platform_setup,
	.init = a5xx_init,
	.remove = a5xx_remove,
	.rb_start = a5xx_rb_start,
	.microcode_read = a5xx_microcode_read,
	.perfcounters = &a5xx_perfcounters,
+2 −3
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 */

#include <linux/of.h>
@@ -716,7 +716,7 @@ static int _preemption_init(struct adreno_device *adreno_dev,

	*cmds++ = 2;
	cmds += cp_gpuaddr(adreno_dev, cmds,
			rb->secure_preemption_desc.gpuaddr);
		rb->secure_preemption_desc->gpuaddr);

	/* Turn CP protection back ON */
	if (!ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
@@ -2541,7 +2541,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit,
	.preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit,
	.preemption_init = a6xx_preemption_init,
	.preemption_close = a6xx_preemption_close,
	.preemption_schedule = a6xx_preemption_schedule,
	.set_marker = a6xx_set_marker,
	.preemption_context_init = a6xx_preemption_context_init,
+1 −2
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 */

#ifndef _ADRENO_A6XX_H_
@@ -251,7 +251,6 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev);
void a6xx_preemption_schedule(struct adreno_device *adreno_dev);
void a6xx_preemption_start(struct adreno_device *adreno_dev);
int a6xx_preemption_init(struct adreno_device *adreno_dev);
void a6xx_preemption_close(struct adreno_device *adreno_dev);

unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
		unsigned int *cmds);
+13 −41
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 */

#include "adreno.h"
@@ -320,13 +320,13 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev)

	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
		lower_32_bits(next->secure_preemption_desc.gpuaddr),
		lower_32_bits(next->secure_preemption_desc->gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK))
		goto err;

	if (adreno_gmu_fenced_write(adreno_dev,
		ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
		upper_32_bits(next->secure_preemption_desc.gpuaddr),
		upper_32_bits(next->secure_preemption_desc->gpuaddr),
		FENCE_STATUS_WRITEDROPPED1_MASK))
		goto err;

@@ -483,7 +483,7 @@ unsigned int a6xx_preemption_pre_ibsubmit(

	*cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR;
	cmds += cp_gpuaddr(adreno_dev, cmds,
			rb->secure_preemption_desc.gpuaddr);
			rb->secure_preemption_desc->gpuaddr);

	if (context) {

@@ -594,7 +594,6 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
	struct adreno_ringbuffer *rb)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	int ret;

	if (IS_ERR_OR_NULL(rb->preemption_desc))
		rb->preemption_desc = kgsl_allocate_global(device,
@@ -603,16 +602,14 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
	if (IS_ERR(rb->preemption_desc))
		return PTR_ERR(rb->preemption_desc);

	ret = kgsl_allocate_user(device, &rb->secure_preemption_desc,
	if (IS_ERR_OR_NULL(rb->secure_preemption_desc))
		rb->secure_preemption_desc = kgsl_allocate_global(device,
			A6XX_CP_CTXRECORD_SIZE_IN_BYTES,
		KGSL_MEMFLAGS_SECURE, KGSL_MEMDESC_PRIVILEGED);
	if (ret)
		return ret;
			KGSL_MEMFLAGS_SECURE, KGSL_MEMDESC_PRIVILEGED,
			"secure_preemption_desc");

	ret = kgsl_iommu_map_global_secure_pt_entry(device,
				&rb->secure_preemption_desc);
	if (ret)
		return ret;
	if (IS_ERR(rb->secure_preemption_desc))
		return PTR_ERR(rb->secure_preemption_desc);

	if (IS_ERR_OR_NULL(rb->perfcounter_save_restore_desc))
		rb->perfcounter_save_restore_desc = kgsl_allocate_global(device,
@@ -646,27 +643,6 @@ static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
	return 0;
}

static void _preemption_close(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	unsigned int i;

	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		kgsl_iommu_unmap_global_secure_pt_entry(device,
				&rb->secure_preemption_desc);
		kgsl_sharedmem_free(&rb->secure_preemption_desc);
	}
}

void a6xx_preemption_close(struct adreno_device *adreno_dev)
{
	if (!test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv))
		return;

	_preemption_close(adreno_dev);
}

int a6xx_preemption_init(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -685,11 +661,9 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
	/* Allocate mem for storing preemption switch record */
	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
		ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb);
		if (ret) {
			_preemption_close(adreno_dev);
		if (ret)
			return ret;
	}
	}

	/* Allocate mem for storing preemption smmu record */
	if (IS_ERR_OR_NULL(iommu->smmu_info))
@@ -698,10 +672,8 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev)
			"smmu_info");

	ret = PTR_ERR_OR_ZERO(iommu->smmu_info);
	if (ret) {
		_preemption_close(adreno_dev);
	if (ret)
		return ret;
	}

	set_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
	return 0;
Loading