Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9f80a737 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Reorganize interrupt management code"

parents 9f8b70c0 3c802114
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -26,7 +26,6 @@ msm_adreno-y += \
	adreno_trace.o \
	adreno_a3xx.o \
	adreno_a4xx.o \
	adreno_a3xx_trace.o \
	adreno_a3xx_snapshot.o \
	adreno_a4xx_snapshot.o \
	adreno.o \
+21 −2
Original line number Diff line number Diff line
@@ -14,13 +14,32 @@
#ifndef _A4XX_REG_H
#define _A4XX_REG_H

/* A4XX interrupt bit that are different from A3XX */
/* A4XX interrupt bits */
#define A4XX_INT_RBBM_GPU_IDLE			0
#define A4XX_INT_RBBM_AHB_ERROR			1
#define A4XX_INT_RBBM_REG_TIMEOUT		2
#define A4XX_INT_RBBM_ME_MS_TIMEOUT		3
#define A4XX_INT_RBBM_PFP_MS_TIMEOUT		4
#define A4XX_INT_RBBM_ETS_MS_TIMEOUT		5
#define A4XX_INT_RBBM_ASYNC_OVERFLOW		6
#define A4XX_INT_RBBM_GPC_ERR			7
#define A4XX_INT_CP_OPCODE_ERR			9
#define A4XX_INT_CP_SW				8
#define A4XX_INT_CP_OPCODE_ERROR		9
#define A4XX_INT_CP_RESERVED_BIT_ERROR		10
#define A4XX_INT_CP_HW_FAULT			11
#define A4XX_INT_CP_DMA				12
#define A4XX_INT_CP_IB2_INT			13
#define A4XX_INT_CP_IB1_INT			14
#define A4XX_INT_CP_RB_INT			15
#define A4XX_INT_CP_REG_PROTECT_FAULT		16
#define A4XX_INT_CP_RB_DONE_TS			17
#define A4XX_INT_CP_VS_DONE_TS			18
#define A4XX_INT_CP_PS_DONE_TS			19
#define A4XX_INT_CACHE_FLUSH_TS			20
#define A4XX_INT_CP_AHB_ERROR_HALT		21
#define A4XX_INT_RBBM_ATB_BUS_OVERFLOW		22
#define A4XX_INT_MISC_HANG_DETECT		24
#define A4XX_INT_UCHE_OOB_ACCESS		25
#define A4XX_INT_RBBM_DPM_CALC_ERR		28
#define A4XX_INT_RBBM_DPM_EPOCH_ERR		29
#define A4XX_INT_RBBM_DPM_THERMAL_YELLOW_ERR	30
+83 −16
Original line number Diff line number Diff line
@@ -350,15 +350,76 @@ static inline void adreno_irqctrl(struct adreno_device *adreno_dev, int state)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	gpudev->irq_control(adreno_dev, state);
	if (state)
		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK,
			      gpudev->irq->mask);
	else
		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK, 0);
}

 /*
 * adreno_hang_int_callback() - Isr for fatal interrupts that hang GPU
 * @adreno_dev: Pointer to device
 * @bit: Interrupt bit
 */
void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit)
{
	struct kgsl_device *device = &adreno_dev->dev;

	KGSL_DRV_CRIT(device, "MISC: GPU hang detected\n");
	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);

	/* Trigger a fault in the dispatcher - this will effect a restart */
	adreno_dispatcher_irq_fault(device);
}

 /*
 * adreno_cp_callback() - CP interrupt handler
 * @adreno_dev: Adreno device pointer
 * @irq: irq number
 *
 * Handle the cp interrupt generated by GPU.
 */
void adreno_cp_callback(struct adreno_device *adreno_dev, int bit)
{
	struct kgsl_device *device = &adreno_dev->dev;

	queue_work(device->work_queue, &device->event_work);
	adreno_dispatcher_schedule(device);
}

static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct adreno_irq *irq_params = gpudev->irq;
	irqreturn_t ret = IRQ_NONE;
	unsigned int status = 0, tmp = 0;
	int i;

	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);

	/* Loop through all set interrupts and call respective handlers */
	for (tmp = status, i = 0; tmp &&
		 i < irq_params->funcs_count; i++) {
		if (tmp & 1) {
			if (irq_params->funcs[i].func != NULL) {
				irq_params->funcs[i].func(adreno_dev, i);
				ret = IRQ_HANDLED;
			} else
			KGSL_DRV_CRIT(device,
					"Unhandled interrupt bit %x\n", i);
		}
		tmp >>= 1;
	}

	gpudev->irq_trace(adreno_dev, status);

	if (status)
		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
				status);
	return ret;

	return gpudev->irq_handler(adreno_dev);
}

static inline bool _rev_match(unsigned int id, unsigned int entry)
@@ -884,13 +945,6 @@ static int adreno_init(struct kgsl_device *device)
		adreno_ft_regs[i] = adreno_getreg(adreno_dev,
					adreno_ft_regs_default[i]);

	/* turn on hang interrupt for A4XX and a330v2 by default */
	if ((adreno_is_a4xx(adreno_dev)) || (adreno_is_a330v2(adreno_dev)))
		set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);

	if (gpudev->irq_setup)
		gpudev->irq_setup(adreno_dev);

	ret = adreno_perfcounter_init(adreno_dev);

	/* Power down the device */
@@ -1475,11 +1529,8 @@ static ssize_t _ft_hang_intr_status_store(struct device *dev,
		case KGSL_STATE_NAP:
		case KGSL_STATE_SLEEP:
			kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
		case KGSL_STATE_ACTIVE: {
			struct adreno_gpudev *gpudev =
				ADRENO_GPU_DEVICE(adreno_dev);
			gpudev->irq_control(adreno_dev, 1);
		}
		case KGSL_STATE_ACTIVE:
			adreno_irqctrl(adreno_dev, 1);
		/*
		 * For following states setting will be picked up on device
		 * start. Still need them in switch statement to differentiate
@@ -2011,6 +2062,23 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
	return status;
}

/*
 * adreno_irq_pending() - Checks if interrupt is generated by h/w
 * @adreno_dev: Pointer to device whose interrupts are checked
 *
 * Returns true if interrupts are pending from device else 0.
 */
inline unsigned int adreno_irq_pending(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int status;

	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);

	return (status & gpudev->irq->mask) ? 1 : 0;
}


/**
 * adreno_hw_isidle() - Check if the GPU core is idle
 * @adreno_dev: Pointer to the Adreno device structure for the GPU
@@ -2021,7 +2089,6 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
bool adreno_hw_isidle(struct adreno_device *adreno_dev)
{
	unsigned int reg_rbbm_status;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
		&reg_rbbm_status);
@@ -2030,7 +2097,7 @@ bool adreno_hw_isidle(struct adreno_device *adreno_dev)
		return false;

	/* Don't consider ourselves idle if there is an IRQ pending */
	if (gpudev->irq_pending(adreno_dev))
	if (adreno_irq_pending(adreno_dev))
		return false;

	return true;
+4 −8
Original line number Diff line number Diff line
@@ -422,14 +422,10 @@ enum adreno_regs {
	ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
	ADRENO_REG_RBBM_INT_0_MASK,
	ADRENO_REG_RBBM_INT_0_STATUS,
	ADRENO_REG_RBBM_AHB_ERROR_STATUS,
	ADRENO_REG_RBBM_PM_OVERRIDE2,
	ADRENO_REG_RBBM_AHB_CMD,
	ADRENO_REG_RBBM_INT_CLEAR_CMD,
	ADRENO_REG_RBBM_SW_RESET_CMD,
	ADRENO_REG_RBBM_CLOCK_CTL,
	ADRENO_REG_RBBM_AHB_ME_SPLIT_STATUS,
	ADRENO_REG_RBBM_AHB_PFP_SPLIT_STATUS,
	ADRENO_REG_VPC_DEBUG_RAM_SEL,
	ADRENO_REG_VPC_DEBUG_RAM_READ,
	ADRENO_REG_PA_SC_AA_CONFIG,
@@ -604,10 +600,7 @@ struct adreno_gpudev {
	struct adreno_irq *irq;
	int num_prio_levels;
	/* GPU specific function hooks */
	irqreturn_t (*irq_handler)(struct adreno_device *);
	void (*irq_control)(struct adreno_device *, int);
	unsigned int (*irq_pending)(struct adreno_device *);
	void (*irq_setup)(struct adreno_device *);
	void (*irq_trace)(struct adreno_device *, unsigned int status);
	void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *);
	int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
	int (*perfcounter_init)(struct adreno_device *);
@@ -801,6 +794,9 @@ void adreno_iommu_set_pt_generate_rb_cmds(struct adreno_ringbuffer *rb,
void adreno_fault_detect_start(struct adreno_device *adreno_dev);
void adreno_fault_detect_stop(struct adreno_device *adreno_dev);

void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit);
void adreno_cp_callback(struct adreno_device *adreno_dev, int bit);

static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
{
	return ((ADRENO_GPUREV(adreno_dev) >= 300) &&
+46 −245
Original line number Diff line number Diff line
@@ -23,8 +23,8 @@
#include "adreno_a3xx.h"
#include "adreno_a4xx.h"
#include "a4xx_reg.h"
#include "adreno_a3xx_trace.h"
#include "adreno_cp_parser.h"
#include "adreno_trace.h"

/*
 * Set of registers to dump for A3XX on snapshot.
@@ -716,20 +716,18 @@ int a3xx_rb_init(struct adreno_device *adreno_dev,
}

/*
 * a3xx_a4xx_err_callback() - Common interrupts shared between A4XX
 * and A3XX
 * a3xx_err_callback() - Call back for a3xx error interrupts
 * @adreno_dev: Pointer to device
 * @bit: Interrupt bit
 */
void a3xx_a4xx_err_callback(struct adreno_device *adreno_dev, int bit)
void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
{
	struct kgsl_device *device = &adreno_dev->dev;
	unsigned int reg;

	switch (bit) {
	case A3XX_INT_RBBM_AHB_ERROR: {
		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_AHB_ERROR_STATUS,
				&reg);
		kgsl_regread(device, A3XX_RBBM_AHB_ERROR_STATUS, &reg);

		/*
		 * Return the word address of the erroring register so that it
@@ -742,137 +740,49 @@ void a3xx_a4xx_err_callback(struct adreno_device *adreno_dev, int bit)
			(reg >> 24) & 0xF);

		/* Clear the error */
		if (adreno_is_a4xx(adreno_dev))
			adreno_writereg(adreno_dev, ADRENO_REG_RBBM_AHB_CMD,
					(1 << 4));
		else
			adreno_writereg(adreno_dev, ADRENO_REG_RBBM_AHB_CMD,
					(1 << 3));
		kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));

		break;
		return;
	}
	case A3XX_INT_RBBM_REG_TIMEOUT:
		KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: AHB register timeout\n");
		break;
	case A3XX_INT_RBBM_ME_MS_TIMEOUT:
		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_AHB_ME_SPLIT_STATUS,
				&reg);
		KGSL_DRV_CRIT_RATELIMIT(device,
			"RBBM | ME master split timeout | status=%x\n", reg);
	case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
		KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus oveflow\n");
		break;
	case A3XX_INT_RBBM_PFP_MS_TIMEOUT:
		adreno_readreg(adreno_dev, ADRENO_REG_RBBM_AHB_PFP_SPLIT_STATUS,
				&reg);
	case A3XX_INT_CP_T0_PACKET_IN_IB:
		KGSL_DRV_CRIT_RATELIMIT(device,
			"RBBM | PFP master split timeout | status=%x\n", reg);
			"ringbuffer TO packet in IB interrupt\n");
		break;
	case A3XX_INT_UCHE_OOB_ACCESS:
	case A3XX_INT_CP_OPCODE_ERROR:
		KGSL_DRV_CRIT_RATELIMIT(device,
			"UCHE:  Out of bounds access\n");
			"ringbuffer opcode error interrupt\n");
		break;
	case A3XX_INT_CP_RESERVED_BIT_ERROR:
		KGSL_DRV_CRIT_RATELIMIT(device,
			"ringbuffer reserved bit error interrupt\n");
		break;
	case A3XX_INT_CP_HW_FAULT:
		adreno_readreg(adreno_dev, ADRENO_REG_CP_HW_FAULT, &reg);
		kgsl_regread(device, A3XX_CP_HW_FAULT, &reg);
		KGSL_DRV_CRIT_RATELIMIT(device,
			"CP | Ringbuffer HW fault | status=%x\n", reg);
		break;
	case A3XX_INT_CP_REG_PROTECT_FAULT:
		adreno_readreg(adreno_dev, ADRENO_REG_CP_PROTECT_STATUS, &reg);

		kgsl_regread(device, A3XX_CP_PROTECT_STATUS, &reg);
		KGSL_DRV_CRIT(device,
			"CP | Protected mode error| %s | addr=%x\n",
			reg & (1 << 24) ? "WRITE" : "READ",
			(reg & 0xFFFFF) >> 2);
		break;
		return;
	case A3XX_INT_CP_AHB_ERROR_HALT:
		KGSL_DRV_CRIT(device, "ringbuffer AHB error interrupt\n");
		break;
	case A3XX_INT_MISC_HANG_DETECT:
		KGSL_DRV_CRIT(device, "MISC: GPU hang detected\n");
		break;
	}
}

static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
{
	struct kgsl_device *device = &adreno_dev->dev;
	const char *err = "";
	/* Limit to 10 messages every 5 seconds */
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);

	switch (bit) {
	case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
		err = "RBBM: ATB bus oveflow";
		break;
	case A3XX_INT_VFD_ERROR:
		err = "VFD: Out of bounds access";
		break;
	case A3XX_INT_CP_OPCODE_ERROR:
		err = "ringbuffer opcode error interrupt";
		break;
	case A3XX_INT_CP_T0_PACKET_IN_IB:
		err = "ringbuffer TO packet in IB interrupt";
		KGSL_DRV_CRIT_RATELIMIT(device,
			"ringbuffer AHB error interrupt\n");
		break;
	default:
		return;
	}

	/*
	* Limit error interrupt reporting to prevent
	* kernel logs causing watchdog timeout
	*/
	if (__ratelimit(&ratelimit_state))
		KGSL_DRV_CRIT(device, "%s\n", err);
}

/*
 * a3xx_fatal_err_callback() - Isr for fatal interrupts that hang GPU
 * @adreno_dev: Pointer to device
 * @bit: Interrupt bit
 *
 * Called for both A4XX and A3XX
 */
void a3xx_fatal_err_callback(struct adreno_device *adreno_dev, int bit)
{
	struct kgsl_device *device = &adreno_dev->dev;

	/* Call the other error routines to get the error print */
	switch (bit) {
	case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
	case A3XX_INT_VFD_ERROR:
	case A3XX_INT_CP_T0_PACKET_IN_IB:
		a3xx_err_callback(adreno_dev, bit);
	case A3XX_INT_UCHE_OOB_ACCESS:
		KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
		break;
	default:
		a3xx_a4xx_err_callback(adreno_dev, bit);
	}

	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);

	/* Trigger a fault in the dispatcher - this will effect a restart */
	adreno_dispatcher_irq_fault(device);
		KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt\n");
	}

/*
 * a3xx_cp_callback() - CP interrupt handler
 * @adreno_dev: Adreno device pointer
 * @irq: irq number
 *
 * Handle the cp interrupt generated by GPU, common function between a3xx and
 * a4xx devices
 */
void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
{
	struct kgsl_device *device = &adreno_dev->dev;

	queue_work(device->work_queue, &device->event_work);
	adreno_dispatcher_schedule(device);
}


static int a3xx_perfcounter_enable_pwr(struct adreno_device *adreno_dev,
	unsigned int counter)
{
@@ -1363,37 +1273,35 @@ uint64_t a3xx_perfcounter_read(struct adreno_device *adreno_dev,

static struct adreno_irq_funcs a3xx_irq_funcs[] = {
	ADRENO_IRQ_CALLBACK(NULL),                    /* 0 - RBBM_GPU_IDLE */
	ADRENO_IRQ_CALLBACK(a3xx_a4xx_err_callback),  /* 1 - RBBM_AHB_ERROR */
	ADRENO_IRQ_CALLBACK(a3xx_a4xx_err_callback),  /* 2 - RBBM_REG_TIMEOUT */
	/* * 3 - RBBM_ME_MS_TIMEOUT */
	ADRENO_IRQ_CALLBACK(a3xx_a4xx_err_callback),
	/* 4 - RBBM_PFP_MS_TIMEOUT */
	ADRENO_IRQ_CALLBACK(a3xx_a4xx_err_callback),
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 1 - RBBM_AHB_ERROR */
	ADRENO_IRQ_CALLBACK(NULL),  /* 2 - RBBM_REG_TIMEOUT */
	ADRENO_IRQ_CALLBACK(NULL),  /* 3 - RBBM_ME_MS_TIMEOUT */
	ADRENO_IRQ_CALLBACK(NULL),  /* 4 - RBBM_PFP_MS_TIMEOUT */
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 5 - RBBM_ATB_BUS_OVERFLOW */
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 6 - RBBM_VFD_ERROR */
	ADRENO_IRQ_CALLBACK(NULL),  /* 6 - RBBM_VFD_ERROR */
	ADRENO_IRQ_CALLBACK(NULL),	/* 7 - CP_SW */
	/* 8 - CP_T0_PACKET_IN_IB */
	ADRENO_IRQ_CALLBACK(a3xx_fatal_err_callback),
	ADRENO_IRQ_CALLBACK(a3xx_fatal_err_callback),  /* 9 - CP_OPCODE_ERROR */
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 8 - CP_T0_PACKET_IN_IB */
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 9 - CP_OPCODE_ERROR */
	/* 10 - CP_RESERVED_BIT_ERROR */
	ADRENO_IRQ_CALLBACK(a3xx_fatal_err_callback),
	ADRENO_IRQ_CALLBACK(a3xx_fatal_err_callback),  /* 11 - CP_HW_FAULT */
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 11 - CP_HW_FAULT */
	ADRENO_IRQ_CALLBACK(NULL),	             /* 12 - CP_DMA */
	ADRENO_IRQ_CALLBACK(a3xx_cp_callback),   /* 13 - CP_IB2_INT */
	ADRENO_IRQ_CALLBACK(a3xx_cp_callback),   /* 14 - CP_IB1_INT */
	ADRENO_IRQ_CALLBACK(a3xx_cp_callback),   /* 15 - CP_RB_INT */
	ADRENO_IRQ_CALLBACK(adreno_cp_callback),   /* 13 - CP_IB2_INT */
	ADRENO_IRQ_CALLBACK(adreno_cp_callback),   /* 14 - CP_IB1_INT */
	ADRENO_IRQ_CALLBACK(adreno_cp_callback),   /* 15 - CP_RB_INT */
	/* 16 - CP_REG_PROTECT_FAULT */
	ADRENO_IRQ_CALLBACK(a3xx_fatal_err_callback),
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),
	ADRENO_IRQ_CALLBACK(NULL),	       /* 17 - CP_RB_DONE_TS */
	ADRENO_IRQ_CALLBACK(NULL),	       /* 18 - CP_VS_DONE_TS */
	ADRENO_IRQ_CALLBACK(NULL),	       /* 19 - CP_PS_DONE_TS */
	ADRENO_IRQ_CALLBACK(NULL),	       /* 20 - CP_CACHE_FLUSH_TS */
	/* 21 - CP_AHB_ERROR_FAULT */
	ADRENO_IRQ_CALLBACK(a3xx_fatal_err_callback),
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),
	ADRENO_IRQ_CALLBACK(NULL),	       /* 22 - Unused */
	ADRENO_IRQ_CALLBACK(NULL),	       /* 23 - Unused */
	ADRENO_IRQ_CALLBACK(a3xx_fatal_err_callback),/* 24 - MISC_HANG_DETECT */
	ADRENO_IRQ_CALLBACK(a3xx_a4xx_err_callback),  /* 25 - UCHE_OOB_ACCESS */
	/* 24 - MISC_HANG_DETECT */
	ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
	ADRENO_IRQ_CALLBACK(a3xx_err_callback),  /* 25 - UCHE_OOB_ACCESS */
	/* 26 to 31 - Unused */
};

@@ -1403,108 +1311,6 @@ static struct adreno_irq a3xx_irq = {
	.mask = A3XX_INT_MASK,
};

/*
 * a3xx_irq_func_setup() - Sets up callback functions and mask for interrupts
 * @adreno_dev: Adreno device pointer
 *
 * Called during initialization
 */
static void a3xx_irq_setup(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct adreno_irq *irq_params = gpudev->irq;
	int i;
	/* On a330v2 only the hang interrupt should be fatal */
	if (adreno_is_a330v2(adreno_dev)) {
		for (i = 0; i < irq_params->funcs_count; i++) {
			if ((irq_params->funcs[i].func ==
				a3xx_fatal_err_callback) &&
				A3XX_INT_MISC_HANG_DETECT != i)
				irq_params->funcs[i].func =
						a3xx_err_callback;
		}
	}
}

/*
 * a3xx_irq_handler() - Interrupt handler function
 * @adreno_dev: Pointer to adreno device
 *
 * Interrupt handler for adreno device, this function is common between
 * a3xx and a4xx devices
 */
irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	struct adreno_irq *irq_params = gpudev->irq;
	irqreturn_t ret = IRQ_NONE;
	unsigned int status, tmp;
	int i;

	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);

	for (tmp = status, i = 0; tmp &&
			i < irq_params->funcs_count; i++) {
		if (tmp & 1) {
			if (irq_params->funcs[i].func != NULL) {
				irq_params->funcs[i].func(adreno_dev, i);
				ret = IRQ_HANDLED;
			} else {
				KGSL_DRV_CRIT(device,
					"Unhandled interrupt bit %x\n", i);
			}
		}

		tmp >>= 1;
	}

	trace_kgsl_a3xx_irq_status(device, status);

	if (status)
		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
				status);
	return ret;
}

/*
 * a3xx_irq_control() - Function called to enable/disable interrupts
 * @adreno_dev: Pointer to device whose interrupts are enabled/disabled
 * @state: When set interrupts are enabled else disabled
 *
 * This function is common for a3xx and a4xx adreno devices
 */
void a3xx_irq_control(struct adreno_device *adreno_dev, int state)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int mask = gpudev->irq->mask;

	if (test_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv))
		mask |= (1 << A3XX_INT_MISC_HANG_DETECT);

	if (state)
		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK, mask);
	else
		adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK, 0);
}

/*
 * a3xx_irq_pending() - Checks if interrupt is generated by h/w
 * @adreno_dev: Pointer to device whose interrupts are checked
 *
 * Returns true if interrupts are pending from device else 0. This
 * function is shared by both a3xx and a4xx devices.
 */
unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev)
{
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	unsigned int status;

	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);

	return (status & gpudev->irq->mask) ? 1 : 0;
}

static unsigned int counter_delta(struct adreno_device *adreno_dev,
			unsigned int reg, unsigned int *counter)
{
@@ -2005,6 +1811,7 @@ static void a3xx_protect_init(struct adreno_device *adreno_dev)
static void a3xx_start(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);

	adreno_vbif_start(adreno_dev, a3xx_vbif_platforms,
			ARRAY_SIZE(a3xx_vbif_platforms));
@@ -2029,10 +1836,12 @@ static void a3xx_start(struct adreno_device *adreno_dev)

	/* Turn on hang detection - this spews a lot of useful information
	 * into the RBBM registers on a hang */
	if (adreno_is_a330v2(adreno_dev))
	if (adreno_is_a330v2(adreno_dev)) {
		set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
		gpudev->irq->mask |= (1 << A3XX_INT_MISC_HANG_DETECT);
		kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
				(1 << 31) | 0xFFFF);
	else
	} else
		kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
				(1 << 16) | 0xFFF);

@@ -2148,8 +1957,6 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
	ADRENO_REG_DEFINE(ADRENO_REG_CP_MERCIU_DATA2, A3XX_CP_MERCIU_DATA2),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_ADDR, A3XX_CP_MEQ_ADDR),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_DATA, A3XX_CP_MEQ_DATA),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A3XX_CP_HW_FAULT),
	ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_STATUS, A3XX_CP_PROTECT_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A3XX_RBBM_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A3XX_RBBM_PERFCTR_CTL),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
@@ -2160,9 +1967,6 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
					A3XX_RBBM_PERFCTR_PWR_1_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A3XX_RBBM_INT_0_MASK),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A3XX_RBBM_INT_0_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_ERROR_STATUS,
					A3XX_RBBM_AHB_ERROR_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_CMD, A3XX_RBBM_AHB_CMD),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
				A3XX_RBBM_INT_CLEAR_CMD),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A3XX_RBBM_CLOCK_CTL),
@@ -2214,15 +2018,12 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
	.ft_perf_counters_count = ARRAY_SIZE(a3xx_ft_perf_counters),
	.perfcounters = &a3xx_perfcounters,
	.irq = &a3xx_irq,
	.irq_trace = trace_kgsl_a3xx_irq_status,
	.snapshot_data = &a3xx_snapshot_data,
	.num_prio_levels = 1,

	.rb_init = a3xx_rb_init,
	.perfcounter_init = a3xx_perfcounter_init,
	.irq_control = a3xx_irq_control,
	.irq_handler = a3xx_irq_handler,
	.irq_pending = a3xx_irq_pending,
	.irq_setup = a3xx_irq_setup,
	.busy_cycles = a3xx_busy_cycles,
	.start = a3xx_start,
	.snapshot = a3xx_snapshot,
Loading