Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 14584cbe authored by Hareesh Gundu's avatar Hareesh Gundu
Browse files

msm: kgsl: Abstract out GMU from the GMU core



A6xx GPUs have different variants of GMU. Abstract
out current GMU implementation under GMU core,
so that it’s easy to add new code for different
GMU variants.

Change-Id: I0dae15bc6905b869c34bbbdebc0b5e9e2f7ff25b
Signed-off-by: default avatarHareesh Gundu <hareeshg@codeaurora.org>
parent d530d303
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@ msm_kgsl_core-y = \
	kgsl_snapshot.o \
	kgsl_events.o \
	kgsl_pool.o \
	kgsl_gmu_core.o \
	kgsl_gmu.o \
	kgsl_hfi.o

+90 −74
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@
#include <linux/msm-bus.h>

#include "kgsl.h"
#include "kgsl_gmu_core.h"
#include "kgsl_pwrscale.h"
#include "kgsl_sharedmem.h"
#include "kgsl_iommu.h"
@@ -79,9 +80,6 @@ static struct adreno_device device_3d0 = {
		.pwrscale = KGSL_PWRSCALE_INIT(&adreno_tz_data),
		.name = DEVICE_3D0_NAME,
		.id = KGSL_DEVICE_3D0,
		.gmu = {
			.load_mode = TCM_BOOT,
		},
		.pwrctrl = {
			.irq_name = "kgsl_3d0_irq",
		},
@@ -634,7 +632,7 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
	 * This is usually harmless because the GMU will abort power collapse
	 * and change the fence back to ALLOW. Poll so that this can happen.
	 */
	if (kgsl_gmu_isenabled(device)) {
	if (gmu_core_isenabled(device)) {
		do {
			adreno_readreg(adreno_dev,
					ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
@@ -1229,7 +1227,6 @@ static int adreno_probe(struct platform_device *pdev)
	struct kgsl_device *device;
	struct adreno_device *adreno_dev;
	int status;
	unsigned long flags;

	adreno_dev = adreno_get_dev(pdev);

@@ -1265,9 +1262,7 @@ static int adreno_probe(struct platform_device *pdev)
	 * Another part of GPU power probe in platform_probe
	 * needs GMU initialized.
	 */
	flags = ADRENO_FEATURE(adreno_dev, ADRENO_GPMU) ? BIT(GMU_GPMU) : 0;

	status = gmu_probe(device, flags);
	status = gmu_core_probe(device);
	if (status) {
		device->pdev = NULL;
		return status;
@@ -1447,7 +1442,7 @@ static int adreno_remove(struct platform_device *pdev)
	adreno_perfcounter_close(adreno_dev);
	kgsl_device_platform_remove(device);

	gmu_remove(device);
	gmu_core_remove(device);

	if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv)) {
		kgsl_free_global(device, &adreno_dev->pwron_fixup);
@@ -1648,7 +1643,7 @@ static bool regulators_left_on(struct kgsl_device *device)
{
	int i;

	if (kgsl_gmu_gpmu_isenabled(device))
	if (gmu_core_gpmu_isenabled(device))
		return false;

	for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
@@ -1764,6 +1759,7 @@ static int _adreno_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
	int status = -EINVAL, ret;
	unsigned int state = device->state;
	bool regulator_left_on;
@@ -1824,17 +1820,14 @@ static int _adreno_start(struct adreno_device *adreno_dev)
	}

	/* Send OOB request to turn on the GX */
	if (gpudev->oob_set) {
		status = gpudev->oob_set(adreno_dev, oob_gpu);
	if (gmu_dev_ops->oob_set) {
		status = gmu_dev_ops->oob_set(adreno_dev, oob_gpu);
		if (status)
			goto error_mmu_off;
	}

	if (adreno_is_a640(adreno_dev)) {
		struct hfi_start_cmd req;

		/* Send hfi start msg */
		status = hfi_send_req(&device->gmu, H2F_MSG_START, &req);
	if (gmu_dev_ops->hfi_start_msg) {
		status = gmu_dev_ops->hfi_start_msg(adreno_dev);
		if (status)
			goto error_mmu_off;
	}
@@ -2011,19 +2004,19 @@ static int _adreno_start(struct adreno_device *adreno_dev)
				pmqos_active_vote);

	/* Send OOB request to allow IFPC */
	if (gpudev->oob_clear) {
		gpudev->oob_clear(adreno_dev, oob_gpu);
	if (gmu_dev_ops->oob_clear) {
		gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);

		/* If we made it this far, the BOOT OOB was sent to the GMU */
		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
			gpudev->oob_clear(adreno_dev, oob_boot_slumber);
			gmu_dev_ops->oob_clear(adreno_dev, oob_boot_slumber);
	}

	return 0;

error_oob_clear:
	if (gpudev->oob_clear)
		gpudev->oob_clear(adreno_dev, oob_gpu);
	if (gmu_dev_ops->oob_clear)
		gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);

error_mmu_off:
	kgsl_mmu_stop(&device->mmu);
@@ -2068,24 +2061,21 @@ int adreno_start(struct kgsl_device *device, int priority)
static int adreno_stop(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
	int error = 0;

	if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
		return 0;

	/* Turn the power on one last time before stopping */
	if (gpudev->oob_set) {
		error = gpudev->oob_set(adreno_dev, oob_gpu);
	if (gmu_dev_ops->oob_set) {
		error = gmu_dev_ops->oob_set(adreno_dev, oob_gpu);
		if (error) {
			struct gmu_device *gmu = &device->gmu;

			gpudev->oob_clear(adreno_dev, oob_gpu);
			if (gmu->gx_gdsc &&
				regulator_is_enabled(gmu->gx_gdsc)) {
			gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);
			if (gmu_core_regulator_isenabled(device)) {
				/* GPU is on. Try recovery */
				set_bit(GMU_FAULT, &gmu->flags);
				gmu_snapshot(device);
				gmu_core_setbit(device, GMU_FAULT);
				gmu_core_snapshot(device);
				error = -EINVAL;
			} else {
				return error;
@@ -2114,8 +2104,8 @@ static int adreno_stop(struct kgsl_device *device)
	/* Save physical performance counter values before GPU power down*/
	adreno_perfcounter_save(adreno_dev);

	if (gpudev->oob_clear)
		gpudev->oob_clear(adreno_dev, oob_gpu);
	if (gmu_dev_ops->oob_clear)
		gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);

	/*
	 * Saving perfcounters will use an OOB to put the GMU into
@@ -2123,12 +2113,11 @@ static int adreno_stop(struct kgsl_device *device)
	 * GMU to return to the lowest idle level. This is
	 * because some idle level transitions require VBIF and MMU.
	 */
	if (!error && gpudev->wait_for_lowest_idle &&
			gpudev->wait_for_lowest_idle(adreno_dev)) {
		struct gmu_device *gmu = &device->gmu;
	if (!error && gmu_dev_ops->wait_for_lowest_idle &&
			gmu_dev_ops->wait_for_lowest_idle(adreno_dev)) {

		set_bit(GMU_FAULT, &gmu->flags);
		gmu_snapshot(device);
		gmu_core_setbit(device, GMU_FAULT);
		gmu_core_snapshot(device);
		/*
		 * Assume GMU hang after 10ms without responding.
		 * It shall be relative safe to clear vbif and stop
@@ -2773,10 +2762,11 @@ int adreno_soft_reset(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
	int ret;

	if (gpudev->oob_set) {
		ret = gpudev->oob_set(adreno_dev, oob_gpu);
	if (gmu_dev_ops->oob_set) {
		ret = gmu_dev_ops->oob_set(adreno_dev, oob_gpu);
		if (ret)
			return ret;
	}
@@ -2799,8 +2789,8 @@ int adreno_soft_reset(struct kgsl_device *device)
	else
		ret = _soft_reset(adreno_dev);
	if (ret) {
		if (gpudev->oob_clear)
			gpudev->oob_clear(adreno_dev, oob_gpu);
		if (gmu_dev_ops->oob_clear)
			gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);
		return ret;
	}

@@ -2853,8 +2843,8 @@ int adreno_soft_reset(struct kgsl_device *device)
	/* Restore physical performance counter values after soft reset */
	adreno_perfcounter_restore(adreno_dev);

	if (gpudev->oob_clear)
		gpudev->oob_clear(adreno_dev, oob_gpu);
	if (gmu_dev_ops->oob_clear)
		gmu_dev_ops->oob_clear(adreno_dev, oob_gpu);

	return ret;
}
@@ -3109,44 +3099,72 @@ static void adreno_regwrite(struct kgsl_device *device,
	__raw_writel(value, reg);
}

static void adreno_gmu_regwrite(struct kgsl_device *device,
				unsigned int offsetwords,
				unsigned int value)
/*
 * adreno_gmu_fenced_write() - Check if there is a GMU and it is enabled
 * @adreno_dev: Pointer to the Adreno device device that owns the GMU
 * @offset: 32bit register enum that is to be written
 * @val: The value to be written to the register
 * @fence_mask: The value to poll the fence status register
 *
 * Check the WRITEDROPPED0/1 bit in the FENCE_STATUS register to check if
 * the write to the fenced register went through. If it didn't then we retry
 * the write until it goes through or we time out.
 */
int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
		enum adreno_regs offset, unsigned int val,
		unsigned int fence_mask)
{
	void __iomem *reg;
	struct gmu_device *gmu = &device->gmu;
	unsigned int status, i;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int reg_offset = gpudev->reg_offsets->offsets[offset];

	trace_kgsl_regwrite(device, offsetwords, value);
	adreno_writereg(adreno_dev, offset, val);

	if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
		return 0;

	offsetwords -= gmu->gmu2gpu_offset;
	reg = gmu->reg_virt + (offsetwords << 2);
	for (i = 0; i < GMU_CORE_WAKEUP_RETRY_MAX; i++) {
		adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
			&status);

		/*
	 * ensure previous writes post before this one,
	 * i.e. act like normal writel()
		 * If !writedropped0/1, then the write to fenced register
		 * was successful
		 */
	wmb();
	__raw_writel(value, reg);
		if (!(status & fence_mask))
			return 0;
		/* Wait a small amount of time before trying again */
		udelay(GMU_CORE_WAKEUP_DELAY_US);

		/* Try to write the fenced register again */
		adreno_writereg(adreno_dev, offset, val);
	}

static void adreno_gmu_regread(struct kgsl_device *device,
				unsigned int offsetwords,
				unsigned int *value)
	dev_err(adreno_dev->dev.dev,
		"GMU fenced register write timed out: reg 0x%x\n", reg_offset);
	return -ETIMEDOUT;
}

unsigned int adreno_gmu_ifpc_show(struct adreno_device *adreno_dev)
{
	void __iomem *reg;
	struct gmu_device *gmu = &device->gmu;
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(
			KGSL_DEVICE(adreno_dev));

	offsetwords -= gmu->gmu2gpu_offset;
	if (gmu_dev_ops->ifpc_show)
		return gmu_dev_ops->ifpc_show(adreno_dev);

	reg = gmu->reg_virt + (offsetwords << 2);
	return 0;
}

	*value = __raw_readl(reg);
int adreno_gmu_ifpc_store(struct adreno_device *adreno_dev, unsigned int val)
{
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(
			KGSL_DEVICE(adreno_dev));

	/*
	 * ensure this read finishes before the next one.
	 * i.e. act like normal readl()
	 */
	rmb();
	if (gmu_dev_ops->ifpc_store)
		return gmu_dev_ops->ifpc_store(adreno_dev, val);

	return -EINVAL;
}

bool adreno_is_cx_dbgc_register(struct kgsl_device *device,
@@ -3600,8 +3618,6 @@ static const struct kgsl_functable adreno_functable = {
	/* Mandatory functions */
	.regread = adreno_regread,
	.regwrite = adreno_regwrite,
	.gmu_regread = adreno_gmu_regread,
	.gmu_regwrite = adreno_gmu_regwrite,
	.idle = adreno_idle,
	.isidle = adreno_isidle,
	.suspend_context = adreno_suspend_context,
+15 −18
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@
#include "adreno_perfcounter.h"
#include <linux/stat.h>
#include <linux/delay.h>
#include "kgsl_gmu.h"
#include "kgsl_gmu_core.h"

#include "a4xx_reg.h"

@@ -912,7 +912,8 @@ struct adreno_gpudev {
	/* GPU specific function hooks */
	void (*irq_trace)(struct adreno_device *, unsigned int status);
	void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *);
	void (*snapshot_gmu)(struct adreno_device *, struct kgsl_snapshot *);
	void (*snapshot_debugbus)(struct adreno_device *adreno_dev,
			struct kgsl_snapshot *snapshot);
	void (*platform_setup)(struct adreno_device *);
	void (*init)(struct adreno_device *);
	void (*remove)(struct adreno_device *);
@@ -953,17 +954,9 @@ struct adreno_gpudev {
	void (*llc_configure_gpuhtw_scid)(struct adreno_device *adreno_dev);
	void (*llc_enable_overrides)(struct adreno_device *adreno_dev);
	void (*pre_reset)(struct adreno_device *);
	int (*oob_set)(struct adreno_device *adreno_dev,
			enum oob_request req);
	void (*oob_clear)(struct adreno_device *adreno_dev,
			enum oob_request req);
	void (*gpu_keepalive)(struct adreno_device *adreno_dev,
			bool state);
	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
				unsigned int arg1, unsigned int arg2);
	bool (*hw_isidle)(struct adreno_device *);
	int (*wait_for_lowest_idle)(struct adreno_device *);
	int (*wait_for_gmu_idle)(struct adreno_device *);
	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
				unsigned int fsynr1);
	int (*reset)(struct kgsl_device *, int fault);
@@ -1369,7 +1362,7 @@ static inline void adreno_read_gmureg(struct adreno_device *adreno_dev,
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (adreno_checkreg_off(adreno_dev, offset_name))
		kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
		gmu_core_regread(KGSL_DEVICE(adreno_dev),
				gpudev->reg_offsets->offsets[offset_name], val);
	else
		*val = 0;
@@ -1388,7 +1381,7 @@ static inline void adreno_write_gmureg(struct adreno_device *adreno_dev,
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (adreno_checkreg_off(adreno_dev, offset_name))
		kgsl_gmu_regwrite(KGSL_DEVICE(adreno_dev),
		gmu_core_regwrite(KGSL_DEVICE(adreno_dev),
				gpudev->reg_offsets->offsets[offset_name], val);
}

@@ -1858,15 +1851,16 @@ static inline unsigned int counter_delta(struct kgsl_device *device,
static inline int adreno_perfcntr_active_oob_get(
		struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(
			KGSL_DEVICE(adreno_dev));
	int ret;

	ret = kgsl_active_count_get(KGSL_DEVICE(adreno_dev));
	if (ret)
		return ret;

	if (gpudev->oob_set) {
		ret = gpudev->oob_set(adreno_dev, oob_perfcntr);
	if (gmu_dev_ops->oob_set) {
		ret = gmu_dev_ops->oob_set(adreno_dev, oob_perfcntr);
		if (ret)
			kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
	}
@@ -1877,10 +1871,11 @@ static inline int adreno_perfcntr_active_oob_get(
static inline void adreno_perfcntr_active_oob_put(
		struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(
			KGSL_DEVICE(adreno_dev));

	if (gpudev->oob_clear)
		gpudev->oob_clear(adreno_dev, oob_perfcntr);
	if (gmu_dev_ops->oob_clear)
		gmu_dev_ops->oob_clear(adreno_dev, oob_perfcntr);

	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
}
@@ -1941,6 +1936,8 @@ static inline void adreno_deassert_gbif_halt(struct adreno_device *adreno_dev)
int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
	enum adreno_regs offset, unsigned int val,
	unsigned int fence_mask);
unsigned int adreno_gmu_ifpc_show(struct adreno_device *adreno_dev);
int adreno_gmu_ifpc_store(struct adreno_device *adreno_dev, unsigned int val);

int adreno_clear_pending_transactions(struct kgsl_device *device);
#endif /*__ADRENO_H */
+21 −24
Original line number Diff line number Diff line
@@ -27,7 +27,6 @@
#include "kgsl_sharedmem.h"
#include "kgsl_log.h"
#include "kgsl.h"
#include "kgsl_gmu.h"
#include "kgsl_hfi.h"
#include "kgsl_trace.h"

@@ -460,7 +459,7 @@ static void a6xx_init(struct adreno_device *adreno_dev)
	 * If the GMU is not enabled, rewrite the offset for the always on
	 * counters to point to the CP always on instead of GMU always on
	 */
	if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
	if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
		_update_always_on_regs(adreno_dev);

	a6xx_pwrup_reglist_init(adreno_dev);
@@ -593,12 +592,12 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
	if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
		on = false;

	if (kgsl_gmu_isenabled(device)) {
		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
	if (gmu_core_isenabled(device)) {
		gmu_core_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
			on ? __get_gmu_ao_cgc_mode_cntl(adreno_dev) : 0);
		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
		gmu_core_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
			on ? __get_gmu_ao_cgc_delay_cntl(adreno_dev) : 0);
		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
		gmu_core_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
			on ? __get_gmu_ao_cgc_hyst_cntl(adreno_dev) : 0);
	}

@@ -621,13 +620,13 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
	regs = a6xx_hwcg_registers[i].regs;

	/* Disable SP clock before programming HWCG registers */
	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
	gmu_core_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);

	for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
		kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);

	/* Enable SP clock */
	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
	gmu_core_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);

	/* enable top level HWCG */
	kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
@@ -703,12 +702,13 @@ static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
static void a6xx_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
	unsigned int bit, mal, mode, glbl_inv;
	unsigned int amsbc = 0;
	static bool patch_reglist;

	/* runtime adjust callbacks based on feature sets */
	if (!kgsl_gmu_isenabled(device))
	if (!gmu_core_isenabled(device))
		/* Legacy idle management if gmu is disabled */
		ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
	/* enable hardware clockgating */
@@ -845,9 +845,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
	 * 3. HFI
	 * At this point, we are guaranteed all.
	 */
	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
			test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
		a6xx_gmu_enable_lm(device);
	if (gmu_dev_ops->enable_lm)
		gmu_dev_ops->enable_lm(device);
}

/*
@@ -1196,11 +1195,13 @@ static inline void a6xx_gpu_keepalive(struct adreno_device *adreno_dev,
			ADRENO_REG_GMU_PWR_COL_KEEPALIVE, state);
}

/* Bitmask for GPU idle status check */
#define GPUBUSYIGNAHB		BIT(23)
static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
{
	unsigned int reg;

	kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
	gmu_core_regread(KGSL_DEVICE(adreno_dev),
		A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
	if (reg & GPUBUSYIGNAHB)
		return false;
@@ -1215,6 +1216,7 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
{
	int ret;
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
	struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);

	if (sqe_fw->memdesc.hostptr == NULL) {
@@ -1224,7 +1226,7 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
			return ret;
	}

	return a6xx_gmu_load_firmware(device);
	return gmu_dev_ops->load_firmware(device);
}

static int a6xx_soft_reset(struct adreno_device *adreno_dev)
@@ -1238,7 +1240,7 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
	 * For the soft reset case with GMU enabled this part is done
	 * by the GMU firmware
	 */
	if (kgsl_gmu_isenabled(device) &&
	if (gmu_core_isenabled(device) &&
		!test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv))
		return 0;

@@ -1281,7 +1283,7 @@ static void a6xx_count_throttles(struct adreno_device *adreno_dev,
		!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
		return;

	kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
	gmu_core_regread(KGSL_DEVICE(adreno_dev),
		adreno_dev->lm_threshold_count,
		&adreno_dev->lm_threshold_cross);
}
@@ -1302,7 +1304,7 @@ static int a6xx_reset(struct kgsl_device *device, int fault)
	int i = 0;

	/* Use the regular reset sequence for No GMU */
	if (!kgsl_gmu_isenabled(device))
	if (!gmu_core_isenabled(device))
		return adreno_reset(device, fault);

	/* Transition from ACTIVE to RESET state */
@@ -2494,7 +2496,7 @@ static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
	if (counter == 0)
		return -EINVAL;

	if (!kgsl_gmu_isenabled(device))
	if (!gmu_core_isenabled(device))
		return -ENODEV;

	kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000);
@@ -2795,7 +2797,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.reg_offsets = &a6xx_reg_offsets,
	.start = a6xx_start,
	.snapshot = a6xx_snapshot,
	.snapshot_gmu = a6xx_snapshot_gmu,
	.snapshot_debugbus = a6xx_snapshot_debugbus,
	.irq = &a6xx_irq,
	.snapshot_data = &a6xx_snapshot_data,
	.irq_trace = trace_kgsl_a5xx_irq_status,
@@ -2813,13 +2815,8 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
	.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
	.llc_configure_gpuhtw_scid = a6xx_llc_configure_gpuhtw_scid,
	.llc_enable_overrides = a6xx_llc_enable_overrides,
	.oob_set = a6xx_gmu_oob_set,
	.oob_clear = a6xx_gmu_oob_clear,
	.gpu_keepalive = a6xx_gpu_keepalive,
	.rpmh_gpu_pwrctrl = a6xx_gmu_rpmh_gpu_pwrctrl,
	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
	.wait_for_lowest_idle = a6xx_gmu_wait_for_lowest_idle,
	.wait_for_gmu_idle = a6xx_gmu_wait_for_idle,
	.iommu_fault_block = a6xx_iommu_fault_block,
	.reset = a6xx_reset,
	.soft_reset = a6xx_soft_reset,
+3 −10
Original line number Diff line number Diff line
@@ -129,18 +129,11 @@ void a6xx_preemption_context_destroy(struct kgsl_context *context);

void a6xx_snapshot(struct adreno_device *adreno_dev,
		struct kgsl_snapshot *snapshot);
void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
void a6xx_snapshot_debugbus(struct adreno_device *adreno_dev,
		struct kgsl_snapshot *snapshot);
void a6xx_gmu_snapshot(struct adreno_device *adreno_dev,
		struct kgsl_snapshot *snapshot);
void a6xx_crashdump_init(struct adreno_device *adreno_dev);

int a6xx_gmu_oob_set(struct adreno_device *adreno_dev, enum oob_request req);
void a6xx_gmu_oob_clear(struct adreno_device *adreno_dev, enum oob_request req);
void a6xx_gmu_enable_lm(struct kgsl_device *device);
int a6xx_gmu_load_firmware(struct kgsl_device *device);
int a6xx_gmu_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
		unsigned int mode, unsigned int arg1, unsigned int arg2);
int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev);
int a6xx_gmu_wait_for_idle(struct adreno_device *adreno_dev);
int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev);
void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
bool a6xx_gmu_gx_is_on(struct adreno_device *adreno_dev);
Loading