Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2e07b7f authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Make clear_pending_transactions target specific"

parents cf0ffb9c ce9c16c7
Loading
Loading
Loading
Loading
+7 −63
Original line number Diff line number Diff line
@@ -1645,64 +1645,6 @@ static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
	adreno_fault_detect_start(adreno_dev);
}

/**
 * adreno_clear_pending_transactions() - Clear transactions in GBIF/VBIF pipe
 * @device: Pointer to the device whose GBIF/VBIF pipe is to be cleared
 */
int adreno_clear_pending_transactions(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int ret = 0;

	if (adreno_has_gbif(adreno_dev)) {

		/* Halt GBIF GX traffic and poll for halt ack */
		if (adreno_is_a615_family(adreno_dev) &&
			!adreno_is_a619_holi(adreno_dev)) {
			adreno_writereg(adreno_dev,
				ADRENO_REG_RBBM_GPR0_CNTL,
				GBIF_HALT_REQUEST);
			ret = adreno_wait_for_halt_ack(device,
				A6XX_RBBM_VBIF_GX_RESET_STATUS,
				VBIF_RESET_ACK_MASK);
		} else {
			adreno_writereg(adreno_dev,
				ADRENO_REG_RBBM_GBIF_HALT,
				gpudev->gbif_gx_halt_mask);
			ret = adreno_wait_for_halt_ack(device,
				ADRENO_REG_RBBM_GBIF_HALT_ACK,
				gpudev->gbif_gx_halt_mask);
		}

		if (ret)
			return ret;

		/* Halt new client requests */
		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT,
				gpudev->gbif_client_halt_mask);
		ret = adreno_wait_for_halt_ack(device,
				ADRENO_REG_GBIF_HALT_ACK,
				gpudev->gbif_client_halt_mask);

		/* Halt all AXI requests */
		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT,
				gpudev->gbif_arb_halt_mask);
		ret = adreno_wait_for_halt_ack(device,
				ADRENO_REG_GBIF_HALT_ACK,
				gpudev->gbif_arb_halt_mask);
	} else {
		unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;

		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0,
			mask);
		ret = adreno_wait_for_halt_ack(device,
				ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask);
		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
	}
	return ret;
}

static int adreno_init(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -2182,6 +2124,7 @@ int adreno_start(struct kgsl_device *device, int priority)
static int adreno_stop(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
	int error = 0;

	if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
@@ -2197,7 +2140,8 @@ static int adreno_stop(struct kgsl_device *device)
	/* Save physical performance counter values before GPU power down*/
	adreno_perfcounter_save(adreno_dev);

	adreno_clear_pending_transactions(device);
	if (gpudev->clear_pending_transactions)
		gpudev->clear_pending_transactions(adreno_dev);

	adreno_dispatcher_stop(adreno_dev);

@@ -2713,9 +2657,9 @@ static int adreno_soft_reset(struct kgsl_device *device)
	if (adreno_is_a304(adreno_dev))
		return -ENODEV;

	ret = adreno_clear_pending_transactions(device);
	if (ret) {
		dev_err(device->dev, "Timed out while clearing the VBIF\n");
	if (gpudev->clear_pending_transactions) {
		ret = gpudev->clear_pending_transactions(adreno_dev);
		if (ret)
			return ret;
	}

+18 −36
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
#ifndef __ADRENO_H
#define __ADRENO_H

#include <linux/iopoll.h>
#include <linux/of.h>
#include "adreno_coresight.h"
#include "adreno_dispatch.h"
@@ -215,8 +216,6 @@ enum adreno_gpurev {
#define ADRENO_GMU_FAULT_SKIP_SNAPSHOT BIT(7)

/* VBIF,  GBIF halt request and ack mask */
#define GBIF_HALT_REQUEST       0x1E0
#define VBIF_RESET_ACK_MASK     0x00f0
#define VBIF_RESET_ACK_TIMEOUT  100

/* number of throttle counters for DCVS adjustment */
@@ -697,15 +696,9 @@ enum adreno_regs {
	ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
	ADRENO_REG_RBBM_GPR0_CNTL,
	ADRENO_REG_RBBM_GBIF_HALT,
	ADRENO_REG_RBBM_GBIF_HALT_ACK,
	ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
	ADRENO_REG_VBIF_XIN_HALT_CTRL0,
	ADRENO_REG_VBIF_XIN_HALT_CTRL1,
	ADRENO_REG_VBIF_VERSION,
	ADRENO_REG_GBIF_HALT,
	ADRENO_REG_GBIF_HALT_ACK,
	ADRENO_REG_VBIF_VERSION,
	ADRENO_REG_RBBM_GBIF_HALT,
	ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
	ADRENO_REG_GMU_AHB_FENCE_STATUS,
	ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
@@ -765,10 +758,6 @@ struct adreno_gpudev {

	struct adreno_coresight *coresight[2];

	unsigned int vbif_xin_halt_ctrl0_mask;
	unsigned int gbif_client_halt_mask;
	unsigned int gbif_arb_halt_mask;
	unsigned int gbif_gx_halt_mask;
	/* GPU specific function hooks */
	int (*probe)(struct platform_device *pdev, u32 chipid,
		const struct adreno_gpu_core *gpucore);
@@ -823,6 +812,7 @@ struct adreno_gpudev {
	 * gpu
	 */
	const struct adreno_power_ops *power_ops;
	int (*clear_pending_transactions)(struct adreno_device *adreno_dev);
};

/**
@@ -1679,35 +1669,27 @@ static inline bool adreno_has_gbif(struct adreno_device *adreno_dev)
}

/**
 * adreno_wait_for_halt_ack() - wait for GBIF/VBIF acknowledgment
 * for given HALT request.
 * adreno_wait_for_halt_ack - wait for acknowlegement for a bus halt request
 * @ack_reg: register offset to wait for acknowledge
 * @mask: A mask value to wait for
 *
 * Return: 0 on success or -ETIMEDOUT if the request timed out
 */
static inline int adreno_wait_for_halt_ack(struct kgsl_device *device,
	int ack_reg, unsigned int mask)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	unsigned long wait_for_vbif;
	unsigned int val;
	int ret = 0;

	/* wait for the transactions to clear */
	wait_for_vbif = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
	while (1) {
		adreno_readreg(adreno_dev, ack_reg,
			&val);
		if ((val & mask) == mask)
			break;
		if (time_after(jiffies, wait_for_vbif)) {
	void __iomem *addr = device->reg_virt + (ack_reg << 2);
	u32 val;

	if (readl_poll_timeout(addr, val, (val & mask) == mask, 100,
		VBIF_RESET_ACK_TIMEOUT  * 1000)) {
		dev_err(device->dev,
				"GBIF/VBIF Halt ack timeout: reg=%08X mask=%08X status=%08X\n",
			"GBIF/VBIF Halt ack timeout: reg=%08x mask=%08x status=%08x\n",
			ack_reg, mask, val);
			ret = -ETIMEDOUT;
			break;
		}
		return -ETIMEDOUT;
	}

	return ret;
	return 0;
}

static inline void adreno_deassert_gbif_halt(struct adreno_device *adreno_dev)
+14 −5
Original line number Diff line number Diff line
@@ -1040,10 +1040,6 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
				A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
				A3XX_VBIF_XIN_HALT_CTRL0),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
				A3XX_VBIF_XIN_HALT_CTRL1),
};

static int _load_firmware(struct kgsl_device *device, const char *fwfile,
@@ -1232,12 +1228,24 @@ static bool a3xx_hw_isidle(struct adreno_device *adreno_dev)
	return adreno_irq_pending(adreno_dev) ? false : true;
}

static int a3xx_clear_pending_transactions(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	u32 mask = A30X_VBIF_XIN_HALT_CTRL0_MASK;
	int ret;

	kgsl_regwrite(device, A3XX_VBIF_XIN_HALT_CTRL0, mask);
	ret = adreno_wait_for_halt_ack(device, A3XX_VBIF_XIN_HALT_CTRL1, mask);
	kgsl_regwrite(device, A3XX_VBIF_XIN_HALT_CTRL0, 0);

	return ret;
}

struct adreno_gpudev adreno_a3xx_gpudev = {
	.reg_offsets = a3xx_register_offsets,
	.ft_perf_counters = a3xx_ft_perf_counters,
	.ft_perf_counters_count = ARRAY_SIZE(a3xx_ft_perf_counters),
	.irq_handler = a3xx_irq_handler,
	.vbif_xin_halt_ctrl0_mask = A30X_VBIF_XIN_HALT_CTRL0_MASK,
	.probe = a3xx_probe,
	.rb_start = a3xx_rb_start,
	.init = a3xx_init,
@@ -1253,4 +1261,5 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
	.read_alwayson = a3xx_read_alwayson,
	.hw_isidle = a3xx_hw_isidle,
	.power_ops = &adreno_power_operations,
	.clear_pending_transactions = a3xx_clear_pending_transactions,
};
+15 −5
Original line number Diff line number Diff line
@@ -2056,10 +2056,6 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
				A5XX_RBBM_PERFCTR_LOAD_VALUE_LO),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
				A5XX_RBBM_PERFCTR_LOAD_VALUE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
				A5XX_VBIF_XIN_HALT_CTRL0),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
				A5XX_VBIF_XIN_HALT_CTRL1),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION,
				A5XX_VBIF_VERSION),
	ADRENO_REG_DEFINE(ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
@@ -2441,6 +2437,20 @@ static bool a5xx_hw_isidle(struct adreno_device *adreno_dev)
	return adreno_irq_pending(adreno_dev) ? false : true;
}

static int a5xx_clear_pending_transactions(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	u32 mask = A5XX_VBIF_XIN_HALT_CTRL0_MASK;
	int ret;

	kgsl_regwrite(device, A5XX_VBIF_XIN_HALT_CTRL0, mask);
	ret = adreno_wait_for_halt_ack(device, A5XX_VBIF_XIN_HALT_CTRL1, mask);
	kgsl_regwrite(device, A5XX_VBIF_XIN_HALT_CTRL0, 0);

	return ret;
}


#ifdef CONFIG_QCOM_KGSL_CORESIGHT
static struct adreno_coresight_register a5xx_coresight_registers[] = {
	{ A5XX_RBBM_CFG_DBGBUS_SEL_A },
@@ -2648,7 +2658,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.irq_handler = a5xx_irq_handler,
	.rb_start = a5xx_rb_start,
	.microcode_read = a5xx_microcode_read,
	.vbif_xin_halt_ctrl0_mask = A5XX_VBIF_XIN_HALT_CTRL0_MASK,
	.is_sptp_idle = a5xx_is_sptp_idle,
	.regulator_enable = a5xx_regulator_enable,
	.regulator_disable = a5xx_regulator_disable,
@@ -2668,4 +2677,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
	.read_alwayson = a5xx_read_alwayson,
	.hw_isidle = a5xx_hw_isidle,
	.power_ops = &adreno_power_operations,
	.clear_pending_transactions = a5xx_clear_pending_transactions,
};
+25 −18
Original line number Diff line number Diff line
@@ -2137,14 +2137,6 @@ int a6xx_probe_common(struct platform_device *pdev,
	adreno_dev->gpu_llc_slice_enable = true;
	adreno_dev->gpuhtw_llc_slice_enable = true;

	if (adreno_has_gbif(adreno_dev)) {
		gpudev->gbif_client_halt_mask = A6XX_GBIF_CLIENT_HALT_MASK;
		gpudev->gbif_arb_halt_mask = A6XX_GBIF_ARB_HALT_MASK;
		gpudev->gbif_gx_halt_mask = A6XX_GBIF_GX_HALT_MASK;
	} else
		gpudev->vbif_xin_halt_ctrl0_mask =
				A6XX_VBIF_XIN_HALT_CTRL0_MASK;

	/* Set the GPU busy counter for frequency scaling */
	adreno_dev->perfctr_pwr_lo = A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L;

@@ -2262,19 +2254,9 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
				A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
				A6XX_VBIF_XIN_HALT_CTRL0),
	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
				A6XX_VBIF_XIN_HALT_CTRL1),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GPR0_CNTL, A6XX_RBBM_GPR0_CNTL),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
				A6XX_RBBM_VBIF_GX_RESET_STATUS),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GBIF_HALT,
				A6XX_RBBM_GBIF_HALT),
	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GBIF_HALT_ACK,
				A6XX_RBBM_GBIF_HALT_ACK),
	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
				A6XX_GMU_AO_HOST_INTERRUPT_MASK),
	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
@@ -2460,6 +2442,29 @@ void a6xx_do_gbif_halt(struct adreno_device *adreno_dev,
	dev_err(device->dev, "%s GBIF Halt ack timed out\n", client);
}

/* This is only defined for non-GMU and non-RGMU targets */
static int a6xx_clear_pending_transactions(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	int ret;

	if (adreno_is_a619_holi(adreno_dev)) {
		kgsl_regwrite(device, A6XX_RBBM_GPR0_CNTL, 0x1e0);
		ret = adreno_wait_for_halt_ack(device,
			A6XX_RBBM_VBIF_GX_RESET_STATUS, 0xf0);
	} else {
		kgsl_regwrite(device, A6XX_RBBM_GBIF_HALT,
			A6XX_GBIF_GX_HALT_MASK);
		ret = adreno_wait_for_halt_ack(device, A6XX_RBBM_GBIF_HALT_ACK,
			A6XX_GBIF_GX_HALT_MASK);
	}

	if (ret)
		return ret;

	return a6xx_halt_gbif(adreno_dev);
}

struct adreno_gpudev adreno_a6xx_gpudev = {
	.reg_offsets = a6xx_register_offsets,
	.probe = a6xx_probe,
@@ -2492,6 +2497,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
#endif
	.read_alwayson = a6xx_read_alwayson,
	.power_ops = &adreno_power_operations,
	.clear_pending_transactions = a6xx_clear_pending_transactions,
};

struct adreno_gpudev adreno_a6xx_hwsched_gpudev = {
@@ -2604,6 +2610,7 @@ struct adreno_gpudev adreno_a619_holi_gpudev = {
#endif
	.read_alwayson = a6xx_read_alwayson,
	.power_ops = &adreno_power_operations,
	.clear_pending_transactions = a6xx_clear_pending_transactions,
};

struct adreno_gpudev adreno_a630_gpudev = {
Loading