Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e0ec501 authored by Lucille Sylvester's avatar Lucille Sylvester
Browse files

msm: kgsl: Enable frequency cycling for thermal mitigation



Allow thermal mitigation to select effective GPU frequencies
between the supported frequencies.  This gives more flexiblity
in the thermal performance limitations.

Change-Id: I618da263b02f09f293d737a13de567a3c31b37f0
Signed-off-by: default avatarLucille Sylvester <lsylvest@codeaurora.org>
parent 4712ce04
Loading
Loading
Loading
Loading
+107 −4
Original line number Diff line number Diff line
@@ -42,6 +42,9 @@
#define INIT_UDELAY		200
#define MAX_UDELAY		2000

/* Number of jiffies for a full thermal cycle */
#define TH_HZ			20

struct clk_pair {
	const char *name;
	uint map;
@@ -205,6 +208,18 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
	 */
	new_level = _adjust_pwrlevel(pwr, new_level, &pwr->constraint);

	/*
	 * If thermal cycling is required and the new level hits the
	 * thermal limit, kick off the cycling.
	 */
	if ((pwr->thermal_cycle == CYCLE_ENABLE) &&
			(new_level == pwr->thermal_pwrlevel)) {
		pwr->thermal_cycle = CYCLE_ACTIVE;
		mod_timer(&pwr->thermal_timer, jiffies +
				(TH_HZ - pwr->thermal_timeout));
		pwr->thermal_highlow = 1;
	}

	if (new_level == old_level)
		return;

@@ -462,8 +477,29 @@ static ssize_t kgsl_pwrctrl_max_gpuclk_store(struct device *dev,

	mutex_lock(&device->mutex);
	level = _get_nearest_pwrlevel(pwr, val);
	if (level < 0)
	/* If the requested power level is not supported by hw, try cycling */
	if (level < 0) {
		unsigned int hfreq, diff, udiff, i;
		if ((val < pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq) ||
			(val > pwr->pwrlevels[0].gpu_freq))
			goto done;
		/* Find the neighboring frequencies */
		for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
			if ((pwr->pwrlevels[i].gpu_freq > val) &&
				(pwr->pwrlevels[i + 1].gpu_freq < val)) {
				level = i;
				break;
			}
		}
		hfreq = pwr->pwrlevels[i].gpu_freq;
		diff =  hfreq - pwr->pwrlevels[i + 1].gpu_freq;
		udiff = hfreq - val;
		pwr->thermal_timeout = (udiff * TH_HZ) / diff;
		pwr->thermal_cycle = CYCLE_ENABLE;
	} else {
		pwr->thermal_cycle = CYCLE_DISABLE;
		del_timer_sync(&pwr->thermal_timer);
	}

	pwr->thermal_pwrlevel = (unsigned int) level;

@@ -482,11 +518,21 @@ static ssize_t kgsl_pwrctrl_max_gpuclk_show(struct device *dev,

	struct kgsl_device *device = kgsl_device_from_dev(dev);
	struct kgsl_pwrctrl *pwr;
	unsigned int freq;
	if (device == NULL)
		return 0;
	pwr = &device->pwrctrl;
	return snprintf(buf, PAGE_SIZE, "%d\n",
			pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
	freq = pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq;
	/* Calculate the effective frequency if we're cycling */
	if (pwr->thermal_cycle) {
		unsigned int hfreq = freq;
		unsigned int lfreq = pwr->pwrlevels[pwr->
				thermal_pwrlevel + 1].gpu_freq;
		freq = pwr->thermal_timeout * (lfreq / TH_HZ) +
			(TH_HZ - pwr->thermal_timeout) * (hfreq / TH_HZ);
	}

	return snprintf(buf, PAGE_SIZE, "%d\n", freq);
}

static ssize_t kgsl_pwrctrl_gpuclk_store(struct device *dev,
@@ -1039,6 +1085,55 @@ void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
}
EXPORT_SYMBOL(kgsl_pwrctrl_irq);

/**
 * kgsl_thermal_cycle() - Work function for thermal timer.
 * @work: The input work
 *
 * This function is called for work that is queued by the thermal
 * timer.  It cycles to the alternate thermal frequency.
 */
static void kgsl_thermal_cycle(struct work_struct *work)
{
	struct kgsl_pwrctrl *pwr = container_of(work, struct kgsl_pwrctrl,
						thermal_cycle_ws);
	struct kgsl_device *device = container_of(pwr, struct kgsl_device,
							pwrctrl);

	if (device == NULL)
		return;

	mutex_lock(&device->mutex);
	if (pwr->thermal_cycle == CYCLE_ACTIVE) {
		if (pwr->thermal_highlow)
			kgsl_pwrctrl_pwrlevel_change(device,
					pwr->thermal_pwrlevel);
		else
			kgsl_pwrctrl_pwrlevel_change(device,
					pwr->thermal_pwrlevel + 1);
	}
	mutex_unlock(&device->mutex);
}

void kgsl_thermal_timer(unsigned long data)
{
	struct kgsl_device *device = (struct kgsl_device *) data;

	/* Keep the timer running consistently despite processing time */
	if (device->pwrctrl.thermal_highlow) {
		mod_timer(&device->pwrctrl.thermal_timer,
					jiffies +
					device->pwrctrl.thermal_timeout);
		device->pwrctrl.thermal_highlow = 0;
	} else {
		mod_timer(&device->pwrctrl.thermal_timer,
					jiffies + (TH_HZ -
					device->pwrctrl.thermal_timeout));
		device->pwrctrl.thermal_highlow = 1;
	}
	/* Have work run in a non-interrupt context. */
	queue_work(device->work_queue, &device->pwrctrl.thermal_cycle_ws);
}

int kgsl_pwrctrl_init(struct kgsl_device *device)
{
	int i, k, m, set_bus = 1, n = 0, result = 0;
@@ -1203,6 +1298,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
	}
	pwr->pwrlevels[0].bus_max = i - 1;

	INIT_WORK(&pwr->thermal_cycle_ws, kgsl_thermal_cycle);
	setup_timer(&pwr->thermal_timer, kgsl_thermal_timer,
			(unsigned long) device);

	return result;

clk_err:
@@ -1470,6 +1569,10 @@ _slumber(struct kgsl_device *device)
	case KGSL_STATE_NAP:
	case KGSL_STATE_SLEEP:
		del_timer_sync(&device->idle_timer);
		if (device->pwrctrl.thermal_cycle == CYCLE_ACTIVE) {
			device->pwrctrl.thermal_cycle = CYCLE_ENABLE;
			del_timer_sync(&device->pwrctrl.thermal_timer);
		}
		/* make sure power is on to stop the device*/
		kgsl_pwrctrl_enable(device);
		device->ftbl->suspend_context(device);
+16 −0
Original line number Diff line number Diff line
@@ -41,6 +41,17 @@
	{ KGSL_CONSTRAINT_PWR_MIN, "Min" }, \
	{ KGSL_CONSTRAINT_PWR_MAX, "Max" }

/*
 * States for thermal cycling.  _DISABLE means that no cycling has been
 * requested.  _ENABLE means that cycling has been requested, but GPU
 * DCVS is currently recommending running at a lower frequency than the
 * cycle frequency.  _ACTIVE means that the frequency is actively being
 * cycled.
 */
#define CYCLE_DISABLE	0
#define CYCLE_ENABLE	1
#define CYCLE_ACTIVE	2

struct platform_device;

struct kgsl_clk_stats {
@@ -122,6 +133,11 @@ struct kgsl_pwrctrl {
	uint64_t bus_ib[KGSL_MAX_PWRLEVELS];
	struct kgsl_pwr_constraint constraint;
	bool superfast;
	struct work_struct thermal_cycle_ws;
	struct timer_list thermal_timer;
	uint32_t thermal_timeout;
	uint32_t thermal_cycle;
	uint32_t thermal_highlow;
};

void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
+19 −1
Original line number Diff line number Diff line
@@ -189,6 +189,21 @@ void kgsl_pwrscale_enable(struct kgsl_device *device)
}
EXPORT_SYMBOL(kgsl_pwrscale_enable);

static int _thermal_adjust(struct kgsl_pwrctrl *pwr, int level)
{
	if (level < pwr->active_pwrlevel)
		return pwr->active_pwrlevel;

	/*
	 * A lower frequency has been recommended!  Stop thermal
	 * cycling (but keep the upper thermal limit) and switch to
	 * the lower frequency.
	 */
	pwr->thermal_cycle = CYCLE_ENABLE;
	del_timer_sync(&pwr->thermal_timer);
	return level;
}

/*
 * kgsl_devfreq_target - devfreq_dev_profile.target callback
 * @dev: see devfreq.h
@@ -232,6 +247,9 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags)
		level = pwr->max_pwrlevel;
		for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--)
			if (*freq <= pwr->pwrlevels[i].gpu_freq) {
				if (pwr->thermal_cycle == CYCLE_ACTIVE)
					level = _thermal_adjust(pwr, i);
				else
					level = i;
				break;
			}