Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf8bb01d authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "PM / devfreq: bw_hwmon: Update to low latency, high sampling rate algorithm"

parents 16556b92 308a03c6
Loading
Loading
Loading
Loading
+55 −49
Original line number Diff line number Diff line
@@ -40,13 +40,6 @@
#define MON_MASK(m)		((m)->base + 0x298)
#define MON_MATCH(m)		((m)->base + 0x29C)

/*
 * Don't set the threshold lower than this value. This helps avoid
 * threshold IRQs when the traffic is close to zero and even small
 * changes can exceed the threshold percentage.
 */
#define FLOOR_MBPS	100UL

struct bwmon_spec {
	bool wrap_on_thres;
	bool overflow;
@@ -78,6 +71,12 @@ static void mon_disable(struct bwmon *m)
static void mon_clear(struct bwmon *m)
{
	writel_relaxed(0x1, MON_CLEAR(m));
	/*
	 * The counter clear and IRQ clear bits are not in the same 4KB
	 * region. So, we need to make sure the counter clear is completed
	 * before we try to clear the IRQ or do any other counter operations.
	 */
	mb();
}

static void mon_irq_enable(struct bwmon *m)
@@ -112,12 +111,12 @@ static void mon_irq_disable(struct bwmon *m)

static unsigned int mon_irq_status(struct bwmon *m)
{
	u32 mval, gval;
	u32 mval;

	mval = readl_relaxed(MON_INT_STATUS(m)),
	gval = readl_relaxed(GLB_INT_STATUS(m));
	mval = readl_relaxed(MON_INT_STATUS(m));

	dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval, gval);
	dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
			readl_relaxed(GLB_INT_STATUS(m)));

	return mval;
}
@@ -165,14 +164,6 @@ static unsigned long mon_get_count(struct bwmon *m)
/* ********** CPUBW specific code  ********** */

/* Returns MBps of read/writes for the sampling window. */
static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
{
	bytes *= USEC_PER_SEC;
	do_div(bytes, us);
	bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
	return bytes;
}

static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
				  unsigned int tolerance_percent)
{
@@ -183,48 +174,61 @@ static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
	return mbps;
}

static unsigned long meas_bw_and_set_irq(struct bw_hwmon *hw,
					 unsigned int tol, unsigned int us)
static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
{
	unsigned long mbps;
	u32 limit;
	unsigned int sample_ms = hw->df->profile->polling_ms;
	struct bwmon *m = to_bwmon(hw);
	unsigned long count;

	mon_disable(m);
	count = mon_get_count(m);
	mon_clear(m);
	mon_irq_clear(m);
	mon_enable(m);

	mbps = mon_get_count(m);
	mbps = bytes_to_mbps(mbps, us);
	return count;
}

static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
{
	unsigned long count;
	u32 limit;
	struct bwmon *m = to_bwmon(hw);

	mon_disable(m);
	count = mon_get_count(m);
	mon_clear(m);
	mon_irq_clear(m);

	/*
	 * If the counter wraps on thres, don't set the thres too low.
	 * Setting it too low runs the risk of the counter wrapping around
	 * multiple times before the IRQ is processed.
	 */
	if (likely(!m->spec->wrap_on_thres))
		limit = mbps_to_bytes(max(mbps, FLOOR_MBPS), sample_ms, tol);
		limit = bytes;
	else
		limit = mbps_to_bytes(max(mbps, 400UL), sample_ms, tol);
		limit = max(bytes, 500000UL);

	mon_set_limit(m, limit);

	mon_clear(m);
	mon_irq_clear(m);
	mon_enable(m);

	dev_dbg(m->dev, "MBps = %lu\n", mbps);
	return mbps;
	return count;
}

static irqreturn_t bwmon_intr_handler(int irq, void *dev)
{
	struct bwmon *m = dev;
	if (mon_irq_status(m)) {
		update_bw_hwmon(&m->hw);

	if (!mon_irq_status(m))
		return IRQ_NONE;

	if (bw_hwmon_sample_end(&m->hw) > 0)
		return IRQ_WAKE_THREAD;

	return IRQ_HANDLED;
}

	return IRQ_NONE;
static irqreturn_t bwmon_intr_thread(int irq, void *dev)
{
	struct bwmon *m = dev;

	update_bw_hwmon(&m->hw);
	return IRQ_HANDLED;
}

static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
@@ -233,7 +237,8 @@ static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
	u32 limit;
	int ret;

	ret = request_threaded_irq(m->irq, NULL, bwmon_intr_handler,
	ret = request_threaded_irq(m->irq, bwmon_intr_handler,
				  bwmon_intr_thread,
				  IRQF_ONESHOT | IRQF_SHARED,
				  dev_name(m->dev), m);
	if (ret) {
@@ -370,11 +375,12 @@ static int bimc_bwmon_driver_probe(struct platform_device *pdev)
	m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
	if (!m->hw.of_node)
		return -EINVAL;
	m->hw.start_hwmon = &start_bw_hwmon,
	m->hw.stop_hwmon = &stop_bw_hwmon,
	m->hw.suspend_hwmon = &suspend_bw_hwmon,
	m->hw.resume_hwmon = &resume_bw_hwmon,
	m->hw.meas_bw_and_set_irq = &meas_bw_and_set_irq,
	m->hw.start_hwmon = &start_bw_hwmon;
	m->hw.stop_hwmon = &stop_bw_hwmon;
	m->hw.suspend_hwmon = &suspend_bw_hwmon;
	m->hw.resume_hwmon = &resume_bw_hwmon;
	m->hw.get_bytes_and_clear = &get_bytes_and_clear;
	m->hw.set_thres = &set_thres;

	ret = register_bw_hwmon(dev, &m->hw);
	if (ret) {
+367 −54
Original line number Diff line number Diff line
/*
 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -25,23 +25,55 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/devfreq.h>
#include <trace/events/power.h>
#include "governor.h"
#include "governor_bw_hwmon.h"

#define NUM_MBPS_ZONES		10
struct hwmon_node {
	unsigned int tolerance_percent;
	unsigned int guard_band_mbps;
	unsigned int decay_rate;
	unsigned int io_percent;
	unsigned int bw_step;
	unsigned int sample_ms;
	unsigned int up_scale;
	unsigned int up_thres;
	unsigned int down_thres;
	unsigned int down_count;
	unsigned int hist_memory;
	unsigned int hyst_trigger_count;
	unsigned int hyst_length;
	unsigned int idle_mbps;
	unsigned int low_power_ceil_mbps;
	unsigned int low_power_io_percent;
	unsigned int low_power_delay;
	unsigned int mbps_zones[NUM_MBPS_ZONES];

	unsigned long prev_ab;
	unsigned long *dev_ab;
	unsigned long resume_freq;
	unsigned long resume_ab;
	unsigned long bytes;
	unsigned long max_mbps;
	unsigned long hist_max_mbps;
	unsigned long hist_mem;
	unsigned long hyst_peak;
	unsigned long hyst_mbps;
	unsigned long hyst_trig_win;
	unsigned long hyst_en;
	unsigned long above_low_power;
	unsigned long prev_req;
	unsigned long up_wake_mbps;
	unsigned long down_wake_mbps;
	unsigned int wake;
	unsigned int down_cnt;
	ktime_t prev_ts;
	ktime_t hist_max_ts;
	bool mon_started;
	struct list_head list;
	void *orig_data;
@@ -50,6 +82,10 @@ struct hwmon_node {
	struct attribute_group *attr_grp;
};

#define UP_WAKE 1
#define DOWN_WAKE 2
static DEFINE_SPINLOCK(irq_lock);

static LIST_HEAD(hwmon_list);
static DEFINE_MUTEX(list_lock);

@@ -88,51 +124,301 @@ show_attr(__attr) \
store_attr(__attr, min, max)		\
static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)

#define show_list_attr(name, n) \
static ssize_t show_list_##name(struct device *dev,			\
			struct device_attribute *attr, char *buf)	\
{									\
	struct devfreq *df = to_devfreq(dev);				\
	struct hwmon_node *hw = df->data;				\
	unsigned int i, cnt = 0;					\
									\
	for (i = 0; i < n && hw->name[i]; i++)				\
		cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
	cnt += snprintf(buf + cnt, PAGE_SIZE, "\n");			\
	return cnt;							\
}

#define store_list_attr(name, n, _min, _max) \
static ssize_t store_list_##name(struct device *dev,			\
			struct device_attribute *attr, const char *buf,	\
			size_t count)					\
{									\
	struct devfreq *df = to_devfreq(dev);				\
	struct hwmon_node *hw = df->data;				\
	int ret;							\
	unsigned int i = 0, val;					\
									\
	do {								\
		ret = sscanf(buf, "%u", &val);				\
		if (ret != 1)						\
			break;						\
		buf = strnchr(buf, PAGE_SIZE, ' ');			\
		if (buf)						\
			buf++;						\
		val = max(val, _min);					\
		val = min(val, _max);					\
		hw->name[i] = val;					\
		i++;							\
	} while (buf && i < n - 1);					\
	if (i < 1)							\
		return -EINVAL;						\
	hw->name[i] = 0;						\
	return count;							\
}

#define gov_list_attr(__attr, n, min, max)	\
show_list_attr(__attr, n)			\
store_list_attr(__attr, n, min, max)		\
static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)

#define MIN_MS	10U
#define MAX_MS	500U

static unsigned long measure_bw_and_set_irq(struct hwmon_node *node)
/* Returns MBps of read/writes for the sampling window. */
static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
{
	bytes *= USEC_PER_SEC;
	do_div(bytes, us);
	bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
	return bytes;
}

static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
				  unsigned int tolerance_percent)
{
	mbps *= (100 + tolerance_percent) * ms;
	mbps /= 100;
	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
	mbps *= SZ_1M;
	return mbps;
}

int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
{
	struct devfreq *df;
	struct hwmon_node *node;
	ktime_t ts;
	unsigned long bytes, mbps, flags;
	unsigned int us;
	unsigned long mbps;
	struct bw_hwmon *hw = node->hw;
	int wake = 0;

	/*
	 * Since we are stopping the counters, we don't want this short work
	 * to be interrupted by other tasks and cause the measurements to be
	 * wrong. Not blocking interrupts to avoid affecting interrupt
	 * latency and since they should be short anyway because they run in
	 * atomic context.
	 */
	preempt_disable();
	df = hwmon->df;
	node = df->data;

	spin_lock_irqsave(&irq_lock, flags);
	ts = ktime_get();
	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
	if (!us)
		us = 1;

	mbps = hw->meas_bw_and_set_irq(hw, node->tolerance_percent, us);
	bytes = hwmon->get_bytes_and_clear(hwmon);
	bytes += node->bytes;
	node->bytes = 0;

	mbps = bytes_to_mbps(bytes, us);
	node->max_mbps = max(node->max_mbps, mbps);

	/*
	 * If the measured bandwidth in a micro sample is greater than the
	 * wake up threshold, it indicates an increase in load that's non
	 * trivial. So, have the governor ignore historical idle time or low
	 * bandwidth usage and do the bandwidth calculation based on just
	 * this micro sample.
	 */
	if (mbps > node->up_wake_mbps)
		wake = UP_WAKE;
	else if (mbps < node->down_wake_mbps) {
		node->down_cnt--;
		if (node->down_cnt <= 0)
			wake = DOWN_WAKE;
	}

	node->prev_ts = ts;
	node->wake = wake;
	spin_unlock_irqrestore(&irq_lock, flags);

	preempt_enable();
	trace_bw_hwmon_meas(dev_name(df->dev.parent),
				mbps,
				us,
				wake);

	dev_dbg(hw->df->dev.parent, "BW MBps = %6lu, period = %u\n", mbps, us);
	dev_dbg(df->dev.parent, "MB/s: %5lu, us:%6d, wake: %d\n",
		mbps, us, wake);

	return mbps;
	return wake;
}

static void compute_bw(struct hwmon_node *node, int mbps,
unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
{
	int i;

	for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
		if (node->mbps_zones[i] >= mbps)
			return node->mbps_zones[i];

	return node->hw->df->max_freq;
}

#define MIN_MBPS	500UL
#define HIST_PEAK_TOL	60
static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
					unsigned long *freq, unsigned long *ab)
{
	int new_bw;
	unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
	unsigned long meas_mbps_zone;
	unsigned long hist_lo_tol, hyst_lo_tol;
	struct bw_hwmon *hw = node->hw;
	unsigned int new_bw, io_percent;

	mbps += node->guard_band_mbps;
	spin_lock_irqsave(&irq_lock, flags);

	if (mbps > node->prev_ab) {
		new_bw = mbps;
	req_mbps = meas_mbps = node->max_mbps;
	node->max_mbps = 0;

	hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
	/* Remember historic peak in the past hist_mem decision windows. */
	if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
		/* If new max or no history */
		node->hist_max_mbps = meas_mbps;
		node->hist_mem = node->hist_memory;
	} else if (meas_mbps >= hist_lo_tol) {
		/*
		 * If subsequent peaks come close (within tolerance) to but
		 * less than the historic peak, then reset the history start,
		 * but not the peak value.
		 */
		node->hist_mem = node->hist_memory;
	} else {
		new_bw = mbps * node->decay_rate
		/* Count down history expiration. */
		if (node->hist_mem)
			node->hist_mem--;
	}

	/* Keep track of whether we are in low power mode consistently. */
	if (meas_mbps > node->low_power_ceil_mbps)
		node->above_low_power = node->low_power_delay;
	if (node->above_low_power)
		node->above_low_power--;

	if (node->above_low_power)
		io_percent = node->io_percent;
	else
		io_percent = node->low_power_io_percent;

	/*
	 * The AB value that corresponds to the lowest mbps zone greater than
	 * or equal to the "frequency" the current measurement will pick.
	 * This upper limit is useful for balancing out any prediction
	 * mechanisms to be power friendly.
	 */
	meas_mbps_zone = (meas_mbps * 100) / io_percent;
	meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
	meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;

	/*
	 * If this is a wake up due to BW increase, vote much higher BW than
	 * what we measure to stay ahead of increasing traffic and then set
	 * it up to vote for measured BW if we see down_count short sample
	 * windows of low traffic.
	 */
	if (node->wake == UP_WAKE) {
		req_mbps += ((meas_mbps - node->prev_req)
				* node->up_scale) / 100;
		/*
		 * Don't drop below max_mbps which caused the UP_WAKE if
		 * down_thres is enabled. This is functionally equivalent of
		 * two adjacent decision windows overlapping by one short
		 * sample window when an UP_WAKE happens.
		 */
		node->max_mbps = meas_mbps;
		node->down_cnt = node->down_count;

		/*
		 * However if the measured load is less than the historic
		 * peak, but the over request is higher than the historic
		 * peak, then we could limit the over requesting to the
		 * historic peak.
		 */
		if (req_mbps > node->hist_max_mbps
		    && meas_mbps < node->hist_max_mbps)
			req_mbps = node->hist_max_mbps;

		req_mbps = min(req_mbps, meas_mbps_zone);
	} else {
		/*
		 * We want to quickly drop the vote only if we are
		 * over-voting (UP_WAKE). So, effectively disable it for all
		 * other cases by setting it to a very large value.
		 */
		node->down_cnt = INT_MAX;
	}

	hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
	if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
		hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
		node->hyst_peak = 0;
		node->hyst_trig_win = node->hyst_length;
		node->hyst_mbps = meas_mbps;
	}

	/*
	 * Check node->max_mbps to avoid double counting peaks that cause
	 * early termination of a window.
	 */
	if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
	    && !node->max_mbps) {
		node->hyst_peak++;
		if (node->hyst_peak >= node->hyst_trigger_count
		    || node->hyst_en)
			node->hyst_en = node->hyst_length;
	}

	if (node->hyst_trig_win)
		node->hyst_trig_win--;
	if (node->hyst_en)
		node->hyst_en--;

	if (!node->hyst_trig_win && !node->hyst_en) {
		node->hyst_peak = 0;
		node->hyst_mbps = 0;
	}

	if (node->hyst_en) {
		if (meas_mbps > node->idle_mbps)
			req_mbps = max(req_mbps, node->hyst_mbps);
	}

	/* Stretch the short sample window size, if the traffic is too low */
	if (meas_mbps < MIN_MBPS) {
		node->up_wake_mbps = (max(MIN_MBPS, req_mbps)
					* (100 + node->up_thres)) / 100;
		node->down_wake_mbps = 0;
		thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
					node->sample_ms, 0);
	} else {
		/*
		 * Up wake vs down wake are intentionally a percentage of
		 * req_mbps vs meas_mbps to make sure the over requesting
		 * phase is handled properly. We only want to wake up and
		 * reduce the vote based on the measured mbps being less than
		 * the previous measurement that caused the "over request".
		 */
		node->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
		node->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
		thres = mbps_to_bytes(meas_mbps, node->sample_ms, 0);
	}

	node->bytes = hw->set_thres(hw, thres);

	node->wake = 0;
	node->prev_req = req_mbps;

	spin_unlock_irqrestore(&irq_lock, flags);

	adj_mbps = req_mbps + node->guard_band_mbps;

	if (adj_mbps > node->prev_ab) {
		new_bw = adj_mbps;
	} else {
		new_bw = adj_mbps * node->decay_rate
			+ node->prev_ab * (100 - node->decay_rate);
		new_bw /= 100;
	}
@@ -140,7 +426,14 @@ static void compute_bw(struct hwmon_node *node, int mbps,
	node->prev_ab = new_bw;
	if (ab)
		*ab = roundup(new_bw, node->bw_step);
	*freq = (new_bw * 100) / node->io_percent;

	*freq = (new_bw * 100) / io_percent;
	trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
				new_bw,
				*freq,
				node->up_wake_mbps,
				node->down_wake_mbps);
	return req_mbps;
}

static struct hwmon_node *find_hwmon_node(struct devfreq *df)
@@ -161,13 +454,10 @@ static struct hwmon_node *find_hwmon_node(struct devfreq *df)
	return found;
}

#define TOO_SOON_US	(1 * USEC_PER_MSEC)
int update_bw_hwmon(struct bw_hwmon *hwmon)
{
	struct devfreq *df;
	struct hwmon_node *node;
	ktime_t ts;
	unsigned int us;
	int ret;

	if (!hwmon)
@@ -175,7 +465,7 @@ int update_bw_hwmon(struct bw_hwmon *hwmon)
	df = hwmon->df;
	if (!df)
		return -ENODEV;
	node = find_hwmon_node(df);
	node = df->data;
	if (!node)
		return -ENODEV;

@@ -185,26 +475,12 @@ int update_bw_hwmon(struct bw_hwmon *hwmon)
	dev_dbg(df->dev.parent, "Got update request\n");
	devfreq_monitor_stop(df);

	/*
	 * Don't recalc bandwidth if the interrupt comes right after a
	 * previous bandwidth calculation.  This is done for two reasons:
	 *
	 * 1. Sampling the BW during a very short duration can result in a
	 *    very inaccurate measurement due to very short bursts.
	 * 2. This can only happen if the limit was hit very close to the end
	 *    of the previous sample period. Which means the current BW
	 *    estimate is not very off and doesn't need to be readjusted.
	 */
	ts = ktime_get();
	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
	if (us > TOO_SOON_US) {
	mutex_lock(&df->lock);
	ret = update_devfreq(df);
	if (ret)
		dev_err(df->dev.parent,
			"Unable to update freq on request!\n");
	mutex_unlock(&df->lock);
	}

	devfreq_monitor_start(df);

@@ -382,7 +658,6 @@ static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
					unsigned long *freq,
					u32 *flag)
{
	unsigned long mbps;
	struct hwmon_node *node = df->data;

	/* Suspend/resume sequence */
@@ -392,8 +667,7 @@ static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
		return 0;
	}

	mbps = measure_bw_and_set_irq(node);
	compute_bw(node, mbps, freq, node->dev_ab);
	get_bw_and_set_irq(node, freq, node->dev_ab);

	return 0;
}
@@ -403,6 +677,19 @@ gov_attr(guard_band_mbps, 0U, 2000U);
gov_attr(decay_rate, 0U, 100U);
gov_attr(io_percent, 1U, 100U);
gov_attr(bw_step, 50U, 1000U);
gov_attr(sample_ms, 1U, 50U);
gov_attr(up_scale, 100U, 500U);
gov_attr(up_thres, 1U, 100U);
gov_attr(down_thres, 0U, 90U);
gov_attr(down_count, 0U, 90U);
gov_attr(hist_memory, 0U, 90U);
gov_attr(hyst_trigger_count, 0U, 90U);
gov_attr(hyst_length, 0U, 90U);
gov_attr(idle_mbps, 0U, 2000U);
gov_attr(low_power_ceil_mbps, 0U, 2500U);
gov_attr(low_power_io_percent, 1U, 100U);
gov_attr(low_power_delay, 1U, 60U);
gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);

static struct attribute *dev_attr[] = {
	&dev_attr_tolerance_percent.attr,
@@ -410,6 +697,19 @@ static struct attribute *dev_attr[] = {
	&dev_attr_decay_rate.attr,
	&dev_attr_io_percent.attr,
	&dev_attr_bw_step.attr,
	&dev_attr_sample_ms.attr,
	&dev_attr_up_scale.attr,
	&dev_attr_up_thres.attr,
	&dev_attr_down_thres.attr,
	&dev_attr_down_count.attr,
	&dev_attr_hist_memory.attr,
	&dev_attr_hyst_trigger_count.attr,
	&dev_attr_hyst_length.attr,
	&dev_attr_idle_mbps.attr,
	&dev_attr_low_power_ceil_mbps.attr,
	&dev_attr_low_power_io_percent.attr,
	&dev_attr_low_power_delay.attr,
	&dev_attr_mbps_zones.attr,
	NULL,
};

@@ -521,8 +821,21 @@ int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
	node->tolerance_percent = 10;
	node->guard_band_mbps = 100;
	node->decay_rate = 90;
	node->io_percent = 16;
	node->io_percent = 34;
	node->low_power_ceil_mbps = 0;
	node->low_power_io_percent = 34;
	node->low_power_delay = 20;
	node->bw_step = 190;
	node->sample_ms = 2;
	node->up_scale = 250;
	node->up_thres = 10;
	node->down_thres = 0;
	node->down_count = 3;
	node->hist_memory = 20;
	node->hyst_trigger_count = 3;
	node->hyst_length = 10;
	node->idle_mbps = 400;
	node->mbps_zones[0] = 100000;
	node->hw = hwmon;

	mutex_lock(&list_lock);
+12 −11
Original line number Diff line number Diff line
/*
 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -21,13 +21,9 @@
 * struct bw_hwmon - dev BW HW monitor info
 * @start_hwmon:		Start the HW monitoring of the dev BW
 * @stop_hwmon:			Stop the HW monitoring of dev BW
 * @is_valid_irq:		Check whether the IRQ was triggered by the
 *				counters used to monitor dev BW.
 * @meas_bw_and_set_irq:	Return the measured bandwidth and set up the
 *				IRQ to fire if the usage exceeds current
 *				measurement by @tol percent.
 * @irq:			IRQ number that corresponds to this HW
 *				monitor.
 * @set_thres:			Set the count threshold to generate an IRQ
 * @get_bytes_and_clear:	Get the bytes transferred since the last call
 *				and reset the counter to start over.
 * @dev:			Pointer to device that this HW monitor can
 *				monitor.
 * @of_node:			OF node of device that this HW monitor can
@@ -49,8 +45,8 @@ struct bw_hwmon {
	void (*stop_hwmon)(struct bw_hwmon *hw);
	int (*suspend_hwmon)(struct bw_hwmon *hw);
	int (*resume_hwmon)(struct bw_hwmon *hw);
	unsigned long (*meas_bw_and_set_irq)(struct bw_hwmon *hw,
					unsigned int tol, unsigned int us);
	unsigned long (*set_thres)(struct bw_hwmon *hw, unsigned long bytes);
	unsigned long (*get_bytes_and_clear)(struct bw_hwmon *hw);
	struct device *dev;
	struct device_node *of_node;
	struct devfreq_governor *gov;
@@ -61,13 +57,18 @@ struct bw_hwmon {
#ifdef CONFIG_DEVFREQ_GOV_MSM_BW_HWMON
int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon);
int update_bw_hwmon(struct bw_hwmon *hwmon);
int bw_hwmon_sample_end(struct bw_hwmon *hwmon);
#else
static inline int register_bw_hwmon(struct device *dev,
					struct bw_hwmon *hwmon)
{
	return 0;
}
int update_bw_hwmon(struct bw_hwmon *hwmon)
static inline int update_bw_hwmon(struct bw_hwmon *hwmon)
{
	return 0;
}
static inline int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
{
	return 0;
}
+59 −0
Original line number Diff line number Diff line
@@ -871,6 +871,65 @@ DEFINE_EVENT(timer_status, single_cycle_exit_timer_stop,
		timer_rate, mode)
);

TRACE_EVENT(bw_hwmon_meas,

	TP_PROTO(const char *name, unsigned long mbps,
		 unsigned long us, int wake),

	TP_ARGS(name, mbps, us, wake),

	TP_STRUCT__entry(
		__string(	name,			name	)
		__field(	unsigned long,		mbps	)
		__field(	unsigned long,		us	)
		__field(	int,			wake	)
	),

	TP_fast_assign(
		__assign_str(name, name);
		__entry->mbps = mbps;
		__entry->us = us;
		__entry->wake = wake;
	),

	TP_printk("dev: %s, mbps = %lu, us = %lu, wake = %d",
		__get_str(name),
		__entry->mbps,
		__entry->us,
		__entry->wake)
);

TRACE_EVENT(bw_hwmon_update,

	TP_PROTO(const char *name, unsigned long mbps, unsigned long freq,
		 unsigned long up_thres, unsigned long down_thres),

	TP_ARGS(name, mbps, freq, up_thres, down_thres),

	TP_STRUCT__entry(
		__string(	name,			name		)
		__field(	unsigned long,		mbps		)
		__field(	unsigned long,		freq		)
		__field(	unsigned long,		up_thres	)
		__field(	unsigned long,		down_thres	)
	),

	TP_fast_assign(
		__assign_str(name, name);
		__entry->mbps = mbps;
		__entry->freq = freq;
		__entry->up_thres = up_thres;
		__entry->down_thres = down_thres;
	),

	TP_printk("dev: %s, mbps = %lu, freq = %lu, up = %lu, down = %lu",
		__get_str(name),
		__entry->mbps,
		__entry->freq,
		__entry->up_thres,
		__entry->down_thres)
);

#endif /* _TRACE_POWER_H */

/* This part must be outside protection */