Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9c053036 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: constify structs with function pointers"

parents 4fc1d397 ddb49277
Loading
Loading
Loading
Loading
+24 −24
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ int adreno_get_firmware(struct adreno_device *adreno_dev,
		return ret;
	}

	firmware->memdesc = kgsl_allocate_global(device, fw->size - 4,
	firmware->memdesc = kgsl_allocate_global(device, fw->size - 4, 0,
				KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_UCODE,
				"ucode");

@@ -145,7 +145,7 @@ void adreno_reglist_write(struct adreno_device *adreno_dev,
void adreno_readreg64(struct adreno_device *adreno_dev,
		enum adreno_regs lo, enum adreno_regs hi, uint64_t *val)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int val_lo = 0, val_hi = 0;
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);

@@ -168,7 +168,7 @@ void adreno_readreg64(struct adreno_device *adreno_dev,
void adreno_writereg64(struct adreno_device *adreno_dev,
		enum adreno_regs lo, enum adreno_regs hi, uint64_t val)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (adreno_checkreg_off(adreno_dev, lo))
		kgsl_regwrite(KGSL_DEVICE(adreno_dev),
@@ -283,7 +283,7 @@ static int _get_counter(struct adreno_device *adreno_dev,
 */
void adreno_fault_detect_start(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int i, j = ARRAY_SIZE(adreno_ft_regs_default);

	if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
@@ -311,7 +311,7 @@ void adreno_fault_detect_start(struct adreno_device *adreno_dev)
 */
void adreno_fault_detect_stop(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int i, j = ARRAY_SIZE(adreno_ft_regs_default);

	if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
@@ -508,7 +508,7 @@ static struct input_handler adreno_input_handler = {
 */
static void _soft_reset(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int reg;

	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
@@ -571,7 +571,7 @@ void adreno_cp_callback(struct adreno_device *adreno_dev, int bit)
static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	irqreturn_t ret;

	atomic_inc(&adreno_dev->pending_irq_refcnt);
@@ -590,7 +590,7 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
}

irqreturn_t adreno_irq_callbacks(struct adreno_device *adreno_dev,
		struct adreno_irq_funcs *funcs, u32 status)
		const struct adreno_irq_funcs *funcs, u32 status)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	irqreturn_t ret = IRQ_NONE;
@@ -1397,7 +1397,7 @@ int adreno_device_probe(struct platform_device *pdev,
		priv |= KGSL_MEMDESC_PRIVILEGED;

	device->memstore = kgsl_allocate_global(device,
		KGSL_MEMSTORE_SIZE, 0, priv, "memstore");
		KGSL_MEMSTORE_SIZE, 0, 0, priv, "memstore");

	status = PTR_ERR_OR_ZERO(device->memstore);
	if (status) {
@@ -1493,7 +1493,7 @@ static void adreno_unbind(struct device *dev)
{
	struct adreno_device *adreno_dev;
	struct kgsl_device *device;
	struct adreno_gpudev *gpudev;
	const struct adreno_gpudev *gpudev;

	device = dev_get_drvdata(dev);
	if (!device)
@@ -1612,7 +1612,7 @@ static int adreno_pm_suspend(struct device *dev)

static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int i;

	if (!ADRENO_FEATURE(adreno_dev, ADRENO_SOFT_FAULT_DETECT))
@@ -1651,7 +1651,7 @@ static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
static int adreno_init(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int ret;

	ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
@@ -1722,7 +1722,7 @@ static int adreno_init(struct kgsl_device *device)
			priv |= KGSL_MEMDESC_PRIVILEGED;

		adreno_dev->profile_buffer =
			kgsl_allocate_global(device, PAGE_SIZE, 0, priv,
			kgsl_allocate_global(device, PAGE_SIZE, 0, 0, priv,
				"alwayson");

		adreno_dev->profile_index = 0;
@@ -2003,7 +2003,7 @@ void adreno_clear_dcvs_counters(struct adreno_device *adreno_dev)
static int _adreno_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int status;
	unsigned int state = device->state;
	bool regulator_left_on;
@@ -2126,7 +2126,7 @@ int adreno_start(struct kgsl_device *device, int priority)
static int adreno_stop(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int error = 0;

	if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
@@ -2174,7 +2174,7 @@ static int adreno_stop(struct kgsl_device *device)
int adreno_reset(struct kgsl_device *device, int fault)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int ret = -EINVAL;
	int i;

@@ -2649,7 +2649,7 @@ bool adreno_irq_pending(struct adreno_device *adreno_dev)
static int adreno_soft_reset(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int ret;

	/*
@@ -2722,7 +2722,7 @@ static int adreno_soft_reset(struct kgsl_device *device)

static bool adreno_isidle(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	struct adreno_ringbuffer *rb;
	int i;

@@ -2983,7 +2983,7 @@ int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
		unsigned int fence_mask)
{
	unsigned int status, i;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int reg_offset = gpudev->reg_offsets[offset];

	adreno_writereg(adreno_dev, offset, val);
@@ -3382,7 +3382,7 @@ static void adreno_power_stats(struct kgsl_device *device,
				struct kgsl_power_stats *stats)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct adreno_busy_data *busy = &adreno_dev->busy_data;
	int64_t adj = 0;
@@ -3504,7 +3504,7 @@ static int adreno_regulator_enable(struct kgsl_device *device)
{
	int ret = 0;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);

	if (gpudev->regulator_enable &&
		!test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
@@ -3520,7 +3520,7 @@ static int adreno_regulator_enable(struct kgsl_device *device)
static bool adreno_is_hw_collapsible(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);

	/*
	 * Skip power collapse for A304, if power ctrl flag is set to
@@ -3538,7 +3538,7 @@ static bool adreno_is_hw_collapsible(struct kgsl_device *device)
static void adreno_regulator_disable(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);

	if (gpudev->regulator_disable &&
		test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
@@ -3553,7 +3553,7 @@ static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
		unsigned int prelevel, unsigned int postlevel, bool post)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);

	if (gpudev->pwrlevel_change_settings)
		gpudev->pwrlevel_change_settings(adreno_dev, prelevel,
+16 −16
Original line number Diff line number Diff line
@@ -390,7 +390,7 @@ struct adreno_gpu_core {
	 */
	const char *compatible;
	unsigned long features;
	struct adreno_gpudev *gpudev;
	const struct adreno_gpudev *gpudev;
	const struct adreno_perfcounters *perfcounters;
	unsigned long gmem_base;
	size_t gmem_size;
@@ -879,14 +879,14 @@ extern unsigned int *adreno_ft_regs;
extern unsigned int adreno_ft_regs_num;
extern unsigned int *adreno_ft_regs_val;

extern struct adreno_gpudev adreno_a3xx_gpudev;
extern struct adreno_gpudev adreno_a5xx_gpudev;
extern struct adreno_gpudev adreno_a6xx_gpudev;
extern struct adreno_gpudev adreno_a6xx_gmu_gpudev;
extern struct adreno_gpudev adreno_a6xx_rgmu_gpudev;
extern struct adreno_gpudev adreno_a619_holi_gpudev;
extern struct adreno_gpudev adreno_a630_gpudev;
extern struct adreno_gpudev adreno_a6xx_hwsched_gpudev;
extern const struct adreno_gpudev adreno_a3xx_gpudev;
extern const struct adreno_gpudev adreno_a5xx_gpudev;
extern const struct adreno_gpudev adreno_a6xx_gpudev;
extern const struct adreno_gpudev adreno_a6xx_gmu_gpudev;
extern const struct adreno_gpudev adreno_a6xx_rgmu_gpudev;
extern const struct adreno_gpudev adreno_a619_holi_gpudev;
extern const struct adreno_gpudev adreno_a630_gpudev;
extern const struct adreno_gpudev adreno_a6xx_hwsched_gpudev;

extern int adreno_wake_nice;
extern unsigned int adreno_wake_timeout;
@@ -1133,7 +1133,7 @@ static inline int adreno_is_a640v2(struct adreno_device *adreno_dev)
static inline bool adreno_checkreg_off(struct adreno_device *adreno_dev,
					enum adreno_regs offset_name)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (offset_name >= ADRENO_REG_REGISTER_MAX ||
		gpudev->reg_offsets[offset_name] == ADRENO_REG_UNUSED)
@@ -1163,7 +1163,7 @@ static inline bool adreno_checkreg_off(struct adreno_device *adreno_dev,
static inline void adreno_readreg(struct adreno_device *adreno_dev,
				enum adreno_regs offset_name, unsigned int *val)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (adreno_checkreg_off(adreno_dev, offset_name))
		kgsl_regread(KGSL_DEVICE(adreno_dev),
@@ -1182,7 +1182,7 @@ static inline void adreno_readreg(struct adreno_device *adreno_dev,
static inline void adreno_writereg(struct adreno_device *adreno_dev,
				enum adreno_regs offset_name, unsigned int val)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (adreno_checkreg_off(adreno_dev, offset_name))
		kgsl_regwrite(KGSL_DEVICE(adreno_dev),
@@ -1198,7 +1198,7 @@ static inline void adreno_writereg(struct adreno_device *adreno_dev,
static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
				enum adreno_regs offset_name)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (!adreno_checkreg_off(adreno_dev, offset_name))
		return ADRENO_REG_REGISTER_MAX;
@@ -1215,7 +1215,7 @@ static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
static inline void adreno_read_gmureg(struct adreno_device *adreno_dev,
				enum adreno_regs offset_name, unsigned int *val)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (adreno_checkreg_off(adreno_dev, offset_name))
		gmu_core_regread(KGSL_DEVICE(adreno_dev),
@@ -1234,7 +1234,7 @@ static inline void adreno_read_gmureg(struct adreno_device *adreno_dev,
static inline void adreno_write_gmureg(struct adreno_device *adreno_dev,
				enum adreno_regs offset_name, unsigned int val)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	if (adreno_checkreg_off(adreno_dev, offset_name))
		gmu_core_regwrite(KGSL_DEVICE(adreno_dev),
@@ -1791,7 +1791,7 @@ int adreno_zap_shader_load(struct adreno_device *adreno_dev,
 * Return: IRQ_HANDLED if one or more interrupt callbacks were called.
 */
irqreturn_t adreno_irq_callbacks(struct adreno_device *adreno_dev,
		struct adreno_irq_funcs *funcs, u32 status);
		const struct adreno_irq_funcs *funcs, u32 status);


/**
+3 −3
Original line number Diff line number Diff line
@@ -149,7 +149,7 @@ static int _a3xx_pwron_fixup(struct adreno_device *adreno_dev)
		return 0;

	adreno_dev->pwron_fixup = kgsl_allocate_global(KGSL_DEVICE(adreno_dev),
		PAGE_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup");
		PAGE_SIZE, 0, KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup");

	if (IS_ERR(adreno_dev->pwron_fixup))
		return PTR_ERR(adreno_dev->pwron_fixup);
@@ -793,7 +793,7 @@ static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
	 (1 << A3XX_INT_CP_AHB_ERROR_HALT) |     \
	 (1 << A3XX_INT_UCHE_OOB_ACCESS))

static struct adreno_irq_funcs a3xx_irq_funcs[32] = {
static const struct adreno_irq_funcs a3xx_irq_funcs[32] = {
	ADRENO_IRQ_CALLBACK(NULL),                    /* 0 - RBBM_GPU_IDLE */
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 1 - RBBM_AHB_ERROR */
	ADRENO_IRQ_CALLBACK(NULL),  /* 2 - RBBM_REG_TIMEOUT */
@@ -1241,7 +1241,7 @@ static int a3xx_clear_pending_transactions(struct adreno_device *adreno_dev)
	return ret;
}

struct adreno_gpudev adreno_a3xx_gpudev = {
const struct adreno_gpudev adreno_a3xx_gpudev = {
	.reg_offsets = a3xx_register_offsets,
	.ft_perf_counters = a3xx_ft_perf_counters,
	.ft_perf_counters_count = ARRAY_SIZE(a3xx_ft_perf_counters),
+4 −4
Original line number Diff line number Diff line
@@ -155,12 +155,12 @@ static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
	uint64_t gpuaddrs[4];

	adreno_dev->critpkts = kgsl_allocate_global(device,
		PAGE_SIZE * 4, 0, 0, "crit_pkts");
		PAGE_SIZE * 4, 0, 0, 0, "crit_pkts");
	if (IS_ERR(adreno_dev->critpkts))
		return PTR_ERR(adreno_dev->critpkts);

	adreno_dev->critpkts_secure = kgsl_allocate_global(device,
		PAGE_SIZE, KGSL_MEMFLAGS_SECURE, 0, "crit_pkts_secure");
		PAGE_SIZE, 0, KGSL_MEMFLAGS_SECURE, 0, "crit_pkts_secure");
	if (IS_ERR(adreno_dev->critpkts_secure))
		return PTR_ERR(adreno_dev->critpkts_secure);

@@ -2343,7 +2343,7 @@ u64 a5xx_read_alwayson(struct adreno_device *adreno_dev)
}


static struct adreno_irq_funcs a5xx_irq_funcs[32] = {
static const struct adreno_irq_funcs a5xx_irq_funcs[32] = {
	ADRENO_IRQ_CALLBACK(NULL),              /* 0 - RBBM_GPU_IDLE */
	ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 1 - RBBM_AHB_ERROR */
	ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 2 - RBBM_TRANSFER_TIMEOUT */
@@ -2646,7 +2646,7 @@ static struct adreno_coresight a5xx_coresight = {
};
#endif

struct adreno_gpudev adreno_a5xx_gpudev = {
const struct adreno_gpudev adreno_a5xx_gpudev = {
	.reg_offsets = a5xx_register_offsets,
#ifdef CONFIG_QCOM_KGSL_CORESIGHT
	.coresight = {&a5xx_coresight},
+76 −16
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@
#include "adreno.h"
#include "adreno_a5xx.h"
#include "adreno_perfcounter.h"
#include "adreno_pm4types.h"
#include "kgsl_device.h"

#define VBIF2_PERF_CNT_SEL_MASK 0x7F
@@ -43,6 +44,65 @@ static int a5xx_counter_enable(struct adreno_device *adreno_dev,
	return 0;
}

static int a5xx_counter_inline_enable(struct adreno_device *adreno_dev,
		const struct adreno_perfcount_group *group,
		unsigned int counter, unsigned int countable)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct adreno_perfcount_register *reg = &group->regs[counter];
	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[0];
	u32 cmds[3];
	int ret;

	if (!(device->state == KGSL_STATE_ACTIVE))
		return a5xx_counter_enable(adreno_dev, group, counter,
			countable);

	cmds[0]  = cp_type7_packet(CP_WAIT_FOR_IDLE, 0);
	cmds[1] = cp_type4_packet(reg->select, 1);
	cmds[2] = countable;

	/* submit to highest priority RB always */
	ret = adreno_ringbuffer_issue_internal_cmds(rb,
			KGSL_CMD_FLAGS_PMODE, cmds, 3);
	if (ret)
		return ret;

	/*
	 * schedule dispatcher to make sure rb[0] is run, because
	 * if the current RB is not rb[0] and gpu is idle then
	 * rb[0] will not get scheduled to run
	 */
	if (adreno_dev->cur_rb != rb)
		adreno_dispatcher_schedule(device);

	/* wait for the above commands submitted to complete */
	ret = adreno_ringbuffer_waittimestamp(rb, rb->timestamp,
		ADRENO_IDLE_TIMEOUT);

	if (ret) {
		/*
		 * If we were woken up because of cancelling rb events
		 * either due to soft reset or adreno_stop, ignore the
		 * error and return 0 here. The perfcounter is already
		 * set up in software and it will be programmed in
		 * hardware when we wake up or come up after soft reset,
		 * by adreno_perfcounter_restore.
		 */
		if (ret == -EAGAIN)
			ret = 0;
		else
			dev_err(device->dev,
				     "Perfcounter %s/%u/%u start via commands failed %d\n",
				     group->name, counter, countable, ret);
	}

	if (!ret)
		reg->value = 0;

	return ret;
}

static int a5xx_counter_rbbm_enable(struct adreno_device *adreno_dev,
		const struct adreno_perfcount_group *group,
		unsigned int counter, unsigned int countable)
@@ -50,7 +110,7 @@ static int a5xx_counter_rbbm_enable(struct adreno_device *adreno_dev,
	if (adreno_is_a540(adreno_dev) && countable == A5XX_RBBM_ALWAYS_COUNT)
		return -EINVAL;

	return a5xx_counter_enable(adreno_dev, group, counter, countable);
	return a5xx_counter_inline_enable(adreno_dev, group, counter, countable);
}

static u64 a5xx_counter_read(struct adreno_device *adreno_dev,
@@ -564,37 +624,37 @@ static struct adreno_perfcount_register a5xx_pwrcounters_alwayson[] = {
static struct adreno_perfcount_group a5xx_perfcounter_groups
				[KGSL_PERFCOUNTER_GROUP_MAX] = {
	A5XX_PERFCOUNTER_GROUP(CP, cp,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(RBBM, rbbm,
		a5xx_counter_rbbm_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(PC, pc,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(VFD, vfd,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(HLSQ, hlsq,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(VPC, vpc,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(CCU, ccu,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(CMP, cmp,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(TSE, tse,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(RAS, ras,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(LRZ, lrz,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(UCHE, uche,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(TP, tp,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(SP, sp,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(RB, rb,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(VSC, vsc,
		a5xx_counter_enable, a5xx_counter_read),
		a5xx_counter_inline_enable, a5xx_counter_read),
	A5XX_PERFCOUNTER_GROUP(VBIF, vbif,
		a5xx_counter_vbif_enable, a5xx_counter_read_norestore),
	A5XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
Loading