Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2cda636 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Move secvid programming to gpudev"

parents e791172b 29511735
Loading
Loading
Loading
Loading
+10 −38
Original line number Diff line number Diff line
@@ -1517,8 +1517,11 @@ static int adreno_probe(struct platform_device *pdev)
		KGSL_MEMSTORE_SIZE, 0, priv, "memstore");

	status = PTR_ERR_OR_ZERO(device->memstore);
	if (status)
		goto out;
	if (status) {
		kgsl_device_platform_remove(device);
		device->pdev = NULL;
		return status;
	}

	/* Initialize the snapshot engine */
	size = adreno_dev->gpucore->snapshot_size;
@@ -1533,10 +1536,6 @@ static int adreno_probe(struct platform_device *pdev)

	kgsl_device_snapshot_probe(device, size);

	status = adreno_dispatcher_init(adreno_dev);
	if (status)
		goto out;

	adreno_debugfs_init(adreno_dev);
	adreno_profile_init(adreno_dev);

@@ -1577,13 +1576,8 @@ static int adreno_probe(struct platform_device *pdev)
		}
	}
#endif
out:
	if (status) {
		kgsl_device_platform_remove(device);
		device->pdev = NULL;
	}

	return status;
	return 0;
}

static void _adreno_free_memories(struct adreno_device *adreno_dev)
@@ -1816,6 +1810,10 @@ static int adreno_init(struct kgsl_device *device)
	if (test_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv))
		return 0;

	ret = adreno_dispatcher_init(adreno_dev);
	if (ret)
		return ret;

	ret = adreno_ringbuffer_init(adreno_dev);
	if (ret)
		return ret;
@@ -1900,28 +1898,6 @@ static bool regulators_left_on(struct kgsl_device *device)
	return false;
}

static void _set_secvid(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	static bool set;

	/* Program GPU contect protection init values */
	if (device->mmu.secured && !set) {
		adreno_writereg(adreno_dev,
				ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);

		adreno_writereg64(adreno_dev,
			ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
			ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
			KGSL_IOMMU_SECURE_BASE(&device->mmu));
		adreno_writereg(adreno_dev,
			ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
			KGSL_IOMMU_SECURE_SIZE);
		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_SECVID_SET_ONCE))
			set = true;
	}
}

int adreno_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
				struct adreno_ringbuffer *rb)
{
@@ -2079,8 +2055,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
	if (status)
		goto error_oob_clear;

	_set_secvid(device);

	if (device->pwrctrl.bus_control) {
		/* VBIF waiting for RAM */
		if (adreno_dev->starved_ram_lo == 0) {
@@ -2923,8 +2897,6 @@ int adreno_soft_reset(struct kgsl_device *device)
	adreno_ringbuffer_set_global(adreno_dev, 0);
	kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);

	_set_secvid(device);

	/* Reinitialize the GPU */
	gpudev->start(adreno_dev);

+0 −4
Original line number Diff line number Diff line
@@ -666,10 +666,6 @@ enum adreno_regs {
	ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
	ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
	ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
	ADRENO_REG_RBBM_GPR0_CNTL,
	ADRENO_REG_RBBM_GBIF_HALT,
	ADRENO_REG_RBBM_GBIF_HALT_ACK,
+10 −8
Original line number Diff line number Diff line
@@ -1524,6 +1524,16 @@ static void a5xx_start(struct adreno_device *adreno_dev)
		kgsl_regrmw(device, A5XX_HLSQ_DBG_ECO_CNTL, 0x1 << 18, 0);
	}

	if (device->mmu.secured) {
		kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_CNTL, 0x0);
		kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
			lower_32_bits(KGSL_IOMMU_SECURE_BASE(&device->mmu)));
		kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
			upper_32_bits(KGSL_IOMMU_SECURE_BASE(&device->mmu)));
		kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE,
			KGSL_IOMMU_SECURE_SIZE);
	}

	a5xx_preemption_start(adreno_dev);
	a5xx_protect_init(adreno_dev);
}
@@ -2400,14 +2410,6 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A5XX_RBBM_PERFCTR_LOAD_VALUE_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
				A5XX_RBBM_PERFCTR_LOAD_VALUE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
				A5XX_RBBM_SECVID_TSB_CNTL),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
				A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
				A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
				A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
				A5XX_VBIF_XIN_HALT_CTRL0),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
+21 −8
Original line number Diff line number Diff line
@@ -324,6 +324,25 @@ static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev);
static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev);
static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev);

static void a6xx_set_secvid(struct kgsl_device *device)
{
	static bool set;

	if (set || !device->mmu.secured)
		return;

	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_CNTL, 0x0);
	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
		lower_32_bits(KGSL_IOMMU_SECURE_BASE(&device->mmu)));
	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
		upper_32_bits(KGSL_IOMMU_SECURE_BASE(&device->mmu)));
	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE,
		KGSL_IOMMU_SECURE_SIZE);

	if (ADRENO_QUIRK(ADRENO_DEVICE(device), ADRENO_QUIRK_SECVID_SET_ONCE))
		set = true;
}

/*
 * Some targets support marking certain transactions as always privileged which
 * allows us to mark more memory as privileged without having to explicitly set
@@ -576,6 +595,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)

	if (ADRENO_FEATURE(adreno_dev, ADRENO_APRIV))
		kgsl_regwrite(device, A6XX_CP_APRIV_CNTL, A6XX_APRIV_DEFAULT);

	a6xx_set_secvid(device);
}

/*
@@ -2387,14 +2408,6 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A6XX_GMU_CM3_CFG),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
				A6XX_GMU_RBBM_INT_UNMASKED_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
				A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
				A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
				A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
				A6XX_RBBM_SECVID_TSB_CNTL),
};

static int cpu_gpu_lock(struct cpu_gpu_lock *lock)
+11 −4
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 */

#include <linux/slab.h>
@@ -2802,8 +2802,16 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
	int ret, i;

	if (test_bit(ADRENO_DISPATCHER_INIT, &dispatcher->priv))
		return 0;

	memset(dispatcher, 0, sizeof(*dispatcher));

	ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
		&device->dev->kobj, "dispatch");
	if (ret)
		return ret;

	mutex_init(&dispatcher->mutex);

	timer_setup(&dispatcher->timer, adreno_dispatcher_timer, 0);
@@ -2820,10 +2828,9 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
	for (i = 0; i < ARRAY_SIZE(dispatcher->jobs); i++)
		init_llist_head(&dispatcher->jobs[i]);

	ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
		&device->dev->kobj, "dispatch");
	set_bit(ADRENO_DISPATCHER_INIT, &dispatcher->priv);

	return ret;
	return 0;
}

void adreno_dispatcher_halt(struct kgsl_device *device)
Loading