Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 71a145c4 authored by Connor O'Brien's avatar Connor O'Brien Committed by Alistair Delva
Browse files

ANDROID: cpufreq: times: Remove per-UID time in state tracking



This removes the portion of commit c408992c ("ANDROID: cpufreq:
times: track per-uid time in state") adding per-UID tracking, while
preserving an added null check in the per-task tracking code.

Bug: 127641090
Signed-off-by: default avatarConnor O'Brien <connoro@google.com>
Change-Id: Iae3e0abef32cbbb9fdc674079973040be0060ac7
parent 10fd62c4
Loading
Loading
Loading
Loading
+0 −189
Original line number Diff line number Diff line
@@ -15,30 +15,14 @@

#include <linux/cpufreq.h>
#include <linux/cpufreq_times.h>
#include <linux/hashtable.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/threads.h>

#define UID_HASH_BITS 10

static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);

static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */

struct uid_entry {
	uid_t uid;
	unsigned int max_state;
	struct hlist_node hash;
	struct rcu_head rcu;
	u64 time_in_state[0];
};

/**
 * struct cpu_freqs - per-cpu frequency information
@@ -58,121 +42,6 @@ static struct cpu_freqs *all_freqs[NR_CPUS];

static unsigned int next_offset;

/* Caller must hold uid lock */
static struct uid_entry *find_uid_entry_locked(uid_t uid)
{
	struct uid_entry *uid_entry;

	hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
		if (uid_entry->uid == uid)
			return uid_entry;
	}
	return NULL;
}

/* Caller must hold uid lock */
static struct uid_entry *find_or_register_uid_locked(uid_t uid)
{
	struct uid_entry *uid_entry, *temp;
	unsigned int max_state = READ_ONCE(next_offset);
	size_t alloc_size = sizeof(*uid_entry) + max_state *
		sizeof(uid_entry->time_in_state[0]);

	uid_entry = find_uid_entry_locked(uid);
	if (uid_entry) {
		if (uid_entry->max_state == max_state)
			return uid_entry;
		/* uid_entry->time_in_state is too small to track all freqs, so
		 * expand it.
		 */
		temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC);
		if (!temp)
			return uid_entry;
		temp->max_state = max_state;
		memset(temp->time_in_state + uid_entry->max_state, 0,
		       (max_state - uid_entry->max_state) *
		       sizeof(uid_entry->time_in_state[0]));
		if (temp != uid_entry) {
			hlist_replace_rcu(&uid_entry->hash, &temp->hash);
			kfree_rcu(uid_entry, rcu);
		}
		return temp;
	}

	uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
	if (!uid_entry)
		return NULL;

	uid_entry->uid = uid;
	uid_entry->max_state = max_state;

	hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);

	return uid_entry;
}

static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos >= HASH_SIZE(uid_hash_table))
		return NULL;

	return &uid_hash_table[*pos];
}

static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	do {
		(*pos)++;

		if (*pos >= HASH_SIZE(uid_hash_table))
			return NULL;
	} while (hlist_empty(&uid_hash_table[*pos]));

	return &uid_hash_table[*pos];
}

static void uid_seq_stop(struct seq_file *seq, void *v) { }

static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
{
	struct uid_entry *uid_entry;
	struct cpu_freqs *freqs, *last_freqs = NULL;
	int i, cpu;

	if (v == uid_hash_table) {
		seq_puts(m, "uid:");
		for_each_possible_cpu(cpu) {
			freqs = all_freqs[cpu];
			if (!freqs || freqs == last_freqs)
				continue;
			last_freqs = freqs;
			for (i = 0; i < freqs->max_state; i++) {
				seq_put_decimal_ull(m, " ",
						    freqs->freq_table[i]);
			}
		}
		seq_putc(m, '\n');
	}

	rcu_read_lock();

	hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
		if (uid_entry->max_state) {
			seq_put_decimal_ull(m, "", uid_entry->uid);
			seq_putc(m, ':');
		}
		for (i = 0; i < uid_entry->max_state; ++i) {
			u64 time = nsec_to_clock_t(uid_entry->time_in_state[i]);
			seq_put_decimal_ull(m, " ", time);
		}
		if (uid_entry->max_state)
			seq_putc(m, '\n');
	}

	rcu_read_unlock();
	return 0;
}

void cpufreq_task_times_init(struct task_struct *p)
{
	unsigned long flags;
@@ -265,9 +134,7 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
{
	unsigned long flags;
	unsigned int state;
	struct uid_entry *uid_entry;
	struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
	uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));

	if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
		return;
@@ -279,12 +146,6 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
	    p->time_in_state)
		p->time_in_state[state] += cputime;
	spin_unlock_irqrestore(&task_time_in_state_lock, flags);

	spin_lock_irqsave(&uid_lock, flags);
	uid_entry = find_or_register_uid_locked(uid);
	if (uid_entry && state < uid_entry->max_state)
		uid_entry->time_in_state[state] += cputime;
	spin_unlock_irqrestore(&uid_lock, flags);
}

static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
@@ -336,27 +197,6 @@ void cpufreq_times_create_policy(struct cpufreq_policy *policy)
		all_freqs[cpu] = freqs;
}

void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
{
	struct uid_entry *uid_entry;
	struct hlist_node *tmp;
	unsigned long flags;

	spin_lock_irqsave(&uid_lock, flags);

	for (; uid_start <= uid_end; uid_start++) {
		hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
			hash, uid_start) {
			if (uid_start == uid_entry->uid) {
				hash_del_rcu(&uid_entry->hash);
				kfree_rcu(uid_entry, rcu);
			}
		}
	}

	spin_unlock_irqrestore(&uid_lock, flags);
}

void cpufreq_times_record_transition(struct cpufreq_policy *policy,
	unsigned int new_freq)
{
@@ -369,32 +209,3 @@ void cpufreq_times_record_transition(struct cpufreq_policy *policy,
	if (index >= 0)
		WRITE_ONCE(freqs->last_index, index);
}

static const struct seq_operations uid_time_in_state_seq_ops = {
	.start = uid_seq_start,
	.next = uid_seq_next,
	.stop = uid_seq_stop,
	.show = uid_time_in_state_seq_show,
};

static int uid_time_in_state_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &uid_time_in_state_seq_ops);
}

static const struct file_operations uid_time_in_state_fops = {
	.open		= uid_time_in_state_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release,
};

static int __init cpufreq_times_init(void)
{
	proc_create_data("uid_time_in_state", 0444, NULL,
			 &uid_time_in_state_fops, NULL);

	return 0;
}

early_initcall(cpufreq_times_init);
+0 −4
Original line number Diff line number Diff line
@@ -14,7 +14,6 @@
 */

#include <linux/atomic.h>
#include <linux/cpufreq_times.h>
#include <linux/err.h>
#include <linux/hashtable.h>
#include <linux/init.h>
@@ -424,9 +423,6 @@ static ssize_t uid_remove_write(struct file *file,
		return -EINVAL;
	}

	/* Also remove uids from /proc/uid_time_in_state */
	cpufreq_task_times_remove_uids(uid_start, uid_end);

	rt_mutex_lock(&uid_lock);

	for (; uid_start <= uid_end; uid_start++) {
+0 −3
Original line number Diff line number Diff line
@@ -29,7 +29,6 @@ void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
void cpufreq_times_create_policy(struct cpufreq_policy *policy);
void cpufreq_times_record_transition(struct cpufreq_policy *policy,
                                     unsigned int new_freq);
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
#else
static inline void cpufreq_task_times_init(struct task_struct *p) {}
static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
@@ -39,7 +38,5 @@ static inline void cpufreq_acct_update_power(struct task_struct *p,
static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
static inline void cpufreq_times_record_transition(
	struct cpufreq_policy *policy, unsigned int new_freq) {}
static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
						  uid_t uid_end) {}
#endif /* CONFIG_CPU_FREQ_TIMES */
#endif /* _LINUX_CPUFREQ_TIMES_H */