Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 028d5c1b authored by Amit Pundir's avatar Amit Pundir
Browse files

Merge branch 'android-3.18' of https://android.googlesource.com/kernel/common

* android-3.18:
  cpufreq_stats: Adds the fucntionality to load current values for each frequency for all the cores.
  cgroup: Fix issues in allow_attach callback
  New Build Breakage in branch: kernel-m-dev-tegra-flounder-3.10 @ 1960706
  net/unix: sk_socket can disappear when state is unlocked
  selinux: enable genfscon labeling for sysfs and pstore files
  ext4: don't save the error information if the block device is read-only
  selinux: enable per-file labeling for debugfs files.
  cpufreq: interactive: Rearm governor timer at max freq
  cpufreq: interactive: Implement cluster-based min_sample_time
  cpufreq: interactive: Exercise hispeed settings at a policy level
  cpufreq: interactive: Round up timer_rate to match jiffy
  cpufreq: interactive: Don't set floor_validate_time during boost
  suspend: Return error when pending wakeup source is found.
  proc: uid_cputime: fix show_uid_stat permission
parents 256c032b 7f53705d
Loading
Loading
Loading
Loading
+53 −97
Original line number Diff line number Diff line
@@ -47,9 +47,10 @@ struct cpufreq_interactive_cpuinfo {
	spinlock_t target_freq_lock; /*protects target freq */
	unsigned int target_freq;
	unsigned int floor_freq;
	unsigned int max_freq;
	u64 floor_validate_time;
	u64 hispeed_validate_time;
	u64 pol_floor_val_time; /* policy floor_validate_time */
	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
	struct rw_semaphore enable_sem;
	int governor_enabled;
};
@@ -345,6 +346,7 @@ static void cpufreq_interactive_timer(unsigned long data)
	unsigned int loadadjfreq;
	unsigned int index;
	unsigned long flags;
	u64 max_fvtime;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
@@ -367,7 +369,7 @@ static void cpufreq_interactive_timer(unsigned long data)
	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;

	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
		if (pcpu->target_freq < tunables->hispeed_freq) {
		if (pcpu->policy->cur < tunables->hispeed_freq) {
			new_freq = tunables->hispeed_freq;
		} else {
			new_freq = choose_freq(pcpu, loadadjfreq);
@@ -378,14 +380,14 @@ static void cpufreq_interactive_timer(unsigned long data)
	} else {
		new_freq = choose_freq(pcpu, loadadjfreq);
		if (new_freq > tunables->hispeed_freq &&
				pcpu->target_freq < tunables->hispeed_freq)
				pcpu->policy->cur < tunables->hispeed_freq)
			new_freq = tunables->hispeed_freq;
	}

	if (pcpu->target_freq >= tunables->hispeed_freq &&
	    new_freq > pcpu->target_freq &&
	    now - pcpu->hispeed_validate_time <
	    freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
	if (pcpu->policy->cur >= tunables->hispeed_freq &&
	    new_freq > pcpu->policy->cur &&
	    now - pcpu->pol_hispeed_val_time <
	    freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
		trace_cpufreq_interactive_notyet(
			data, cpu_load, pcpu->target_freq,
			pcpu->policy->cur, new_freq);
@@ -393,7 +395,7 @@ static void cpufreq_interactive_timer(unsigned long data)
		goto rearm;
	}

	pcpu->hispeed_validate_time = now;
	pcpu->loc_hispeed_val_time = now;

	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
					   new_freq, CPUFREQ_RELATION_L,
@@ -408,9 +410,10 @@ static void cpufreq_interactive_timer(unsigned long data)
	 * Do not scale below floor_freq unless we have been at or above the
	 * floor frequency for the minimum sample time since last validated.
	 */
	if (new_freq < pcpu->floor_freq) {
		if (now - pcpu->floor_validate_time <
				tunables->min_sample_time) {
	max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
	if (new_freq < pcpu->floor_freq &&
	    pcpu->target_freq >= pcpu->policy->cur) {
		if (now - max_fvtime < tunables->min_sample_time) {
			trace_cpufreq_interactive_notyet(
				data, cpu_load, pcpu->target_freq,
				pcpu->policy->cur, new_freq);
@@ -429,7 +432,9 @@ static void cpufreq_interactive_timer(unsigned long data)

	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
		pcpu->floor_freq = new_freq;
		pcpu->floor_validate_time = now;
		if (pcpu->target_freq >= pcpu->policy->cur ||
		    new_freq >= pcpu->policy->cur)
			pcpu->loc_floor_val_time = now;
	}

	if (pcpu->target_freq == new_freq &&
@@ -438,7 +443,7 @@ static void cpufreq_interactive_timer(unsigned long data)
			data, cpu_load, pcpu->target_freq,
			pcpu->policy->cur, new_freq);
		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
		goto rearm_if_notmax;
		goto rearm;
	}

	trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
@@ -451,14 +456,6 @@ static void cpufreq_interactive_timer(unsigned long data)
	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
	wake_up_process(speedchange_task);

rearm_if_notmax:
	/*
	 * Already set max speed and don't see a need to change that,
	 * wait until next idle to re-evaluate, don't need timer.
	 */
	if (pcpu->target_freq == pcpu->policy->max)
		goto exit;

rearm:
	if (!timer_pending(&pcpu->cpu_timer))
		cpufreq_interactive_timer_resched(pcpu);
@@ -468,37 +465,6 @@ exit:
	return;
}

static void cpufreq_interactive_idle_start(void)
{
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, smp_processor_id());
	int pending;

	if (!down_read_trylock(&pcpu->enable_sem))
		return;
	if (!pcpu->governor_enabled) {
		up_read(&pcpu->enable_sem);
		return;
	}

	pending = timer_pending(&pcpu->cpu_timer);

	if (pcpu->target_freq != pcpu->policy->min) {
		/*
		 * Entering idle while not at lowest speed.  On some
		 * platforms this can hold the other CPU(s) at that speed
		 * even though the CPU is idle. Set a timer to re-evaluate
		 * speed so this idle CPU doesn't hold the other CPUs above
		 * min indefinitely.  This should probably be a quirk of
		 * the CPUFreq driver.
		 */
		if (!pending)
			cpufreq_interactive_timer_resched(pcpu);
	}

	up_read(&pcpu->enable_sem);
}

static void cpufreq_interactive_idle_end(void)
{
	struct cpufreq_interactive_cpuinfo *pcpu =
@@ -553,6 +519,8 @@ static int cpufreq_interactive_speedchange_task(void *data)
		for_each_cpu(cpu, &tmp_mask) {
			unsigned int j;
			unsigned int max_freq = 0;
			struct cpufreq_interactive_cpuinfo *pjcpu;
			u64 hvt = ~0ULL, fvt = 0;

			pcpu = &per_cpu(cpuinfo, cpu);
			if (!down_read_trylock(&pcpu->enable_sem))
@@ -563,17 +531,30 @@ static int cpufreq_interactive_speedchange_task(void *data)
			}

			for_each_cpu(j, pcpu->policy->cpus) {
				struct cpufreq_interactive_cpuinfo *pjcpu =
					&per_cpu(cpuinfo, j);
				pjcpu = &per_cpu(cpuinfo, j);

				if (pjcpu->target_freq > max_freq)
				fvt = max(fvt, pjcpu->loc_floor_val_time);
				if (pjcpu->target_freq > max_freq) {
					max_freq = pjcpu->target_freq;
					hvt = pjcpu->loc_hispeed_val_time;
				} else if (pjcpu->target_freq == max_freq) {
					hvt = min(hvt, pjcpu->loc_hispeed_val_time);
				}
			}
			for_each_cpu(j, pcpu->policy->cpus) {
				pjcpu = &per_cpu(cpuinfo, j);
				pjcpu->pol_floor_val_time = fvt;
			}

			if (max_freq != pcpu->policy->cur)
			if (max_freq != pcpu->policy->cur) {
				__cpufreq_driver_target(pcpu->policy,
							max_freq,
							CPUFREQ_RELATION_H);
				for_each_cpu(j, pcpu->policy->cpus) {
					pjcpu = &per_cpu(cpuinfo, j);
					pjcpu->pol_hispeed_val_time = hvt;
				}
			}
			trace_cpufreq_interactive_setspeed(cpu,
						     pcpu->target_freq,
						     pcpu->policy->cur);
@@ -605,18 +586,10 @@ static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunab
		if (pcpu->target_freq < tunables->hispeed_freq) {
			pcpu->target_freq = tunables->hispeed_freq;
			cpumask_set_cpu(i, &speedchange_cpumask);
			pcpu->hispeed_validate_time =
			pcpu->pol_hispeed_val_time =
				ktime_to_us(ktime_get());
			anyboost = 1;
		}

		/*
		 * Set floor freq and (re)start timer for when last
		 * validated.
		 */

		pcpu->floor_freq = tunables->hispeed_freq;
		pcpu->floor_validate_time = ktime_to_us(ktime_get());
		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
	}

@@ -863,12 +836,18 @@ static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
		const char *buf, size_t count)
{
	int ret;
	unsigned long val;
	unsigned long val, val_round;

	ret = kstrtoul(buf, 0, &val);
	if (ret < 0)
		return ret;
	tunables->timer_rate = val;

	val_round = jiffies_to_usecs(usecs_to_jiffies(val));
	if (val != val_round)
		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
			val_round);

	tunables->timer_rate = val_round;
	return count;
}

@@ -1110,14 +1089,8 @@ static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
					     unsigned long val,
					     void *data)
{
	switch (val) {
	case IDLE_START:
		cpufreq_interactive_idle_start();
		break;
	case IDLE_END:
	if (val == IDLE_END)
		cpufreq_interactive_idle_end();
		break;
	}

	return 0;
}
@@ -1234,11 +1207,11 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
			pcpu->target_freq = policy->cur;
			pcpu->freq_table = freq_table;
			pcpu->floor_freq = pcpu->target_freq;
			pcpu->floor_validate_time =
			pcpu->pol_floor_val_time =
				ktime_to_us(ktime_get());
			pcpu->hispeed_validate_time =
				pcpu->floor_validate_time;
			pcpu->max_freq = policy->max;
			pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
			pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
			pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
			down_write(&pcpu->enable_sem);
			del_timer_sync(&pcpu->cpu_timer);
			del_timer_sync(&pcpu->cpu_slack_timer);
@@ -1288,23 +1261,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,

			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
			up_read(&pcpu->enable_sem);

			/* Reschedule timer only if policy->max is raised.
			 * Delete the timers, else the timer callback may
			 * return without re-arm the timer when failed
			 * acquire the semaphore. This race may cause timer
			 * stopped unexpectedly.
			 */

			if (policy->max > pcpu->max_freq) {
				down_write(&pcpu->enable_sem);
				del_timer_sync(&pcpu->cpu_timer);
				del_timer_sync(&pcpu->cpu_slack_timer);
				cpufreq_interactive_timer_start(tunables, j);
				up_write(&pcpu->enable_sem);
			}

			pcpu->max_freq = policy->max;
		}
		break;
	}
+155 −45
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/of.h>
#include <linux/cputime.h>

static spinlock_t cpufreq_stats_lock;
@@ -38,6 +39,12 @@ struct all_cpufreq_stats {
	unsigned int *freq_table;
};

struct cpufreq_power_stats {
	unsigned int state_num;
	unsigned int *curr;
	unsigned int *freq_table;
};

struct all_freq_table {
	unsigned int *freq_table;
	unsigned int table_size;
@@ -47,6 +54,7 @@ static struct all_freq_table *all_freq_table;

static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);

struct cpufreq_stats_attribute {
	struct attribute attr;
@@ -117,6 +125,29 @@ static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
	return -1;
}

static ssize_t show_current_in_state(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf)
{
	ssize_t len = 0;
	unsigned int i, cpu;
	struct cpufreq_power_stats *powerstats;

	spin_lock(&cpufreq_stats_lock);
	for_each_possible_cpu(cpu) {
		powerstats = per_cpu(cpufreq_power_stats, cpu);
		if (!powerstats)
			continue;
		len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
		for (i = 0; i < powerstats->state_num; i++)
			len += scnprintf(buf + len, PAGE_SIZE - len,
					"%d=%d ", powerstats->freq_table[i],
					powerstats->curr[i]);
		len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
	}
	spin_unlock(&cpufreq_stats_lock);
	return len;
}

static ssize_t show_all_time_in_state(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf)
{
@@ -226,6 +257,9 @@ static struct attribute_group stats_attr_group = {
static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
		0444, show_all_time_in_state, NULL);

static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
		0444, show_current_in_state, NULL);

static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
{
	int index;
@@ -287,17 +321,31 @@ static void cpufreq_allstats_free(void)
	}
}

static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
static void cpufreq_powerstats_free(void)
{
	int cpu;
	struct cpufreq_power_stats *powerstats;

	sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);

	for_each_possible_cpu(cpu) {
		powerstats = per_cpu(cpufreq_power_stats, cpu);
		if (!powerstats)
			continue;
		kfree(powerstats->curr);
		kfree(powerstats);
		per_cpu(cpufreq_power_stats, cpu) = NULL;
	}
}

static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
		struct cpufreq_frequency_table *table, int count)
{
	unsigned int i, count = 0, ret = 0;
	unsigned int i, ret = 0;
	struct cpufreq_stats *stat;
	unsigned int alloc_size;
	unsigned int cpu = policy->cpu;
	struct cpufreq_frequency_table *pos, *table;

	table = cpufreq_frequency_get_table(cpu);
	if (unlikely(!table))
		return 0;
	struct cpufreq_frequency_table *pos;

	if (per_cpu(cpufreq_stats_table, cpu))
		return -EBUSY;
@@ -312,9 +360,6 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
	stat->cpu = cpu;
	per_cpu(cpufreq_stats_table, cpu) = stat;

	cpufreq_for_each_valid_entry(pos, table)
		count++;

	alloc_size = count * sizeof(int) + count * sizeof(u64);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
@@ -349,23 +394,6 @@ error_out:
	return ret;
}

static void cpufreq_stats_create_table(unsigned int cpu)
{
	struct cpufreq_policy *policy;

	/*
	 * "likely(!policy)" because normally cpufreq_stats will be registered
	 * before cpufreq driver
	 */
	policy = cpufreq_cpu_get(cpu);
	if (likely(!policy))
		return;

	__cpufreq_stats_create_table(policy);

	cpufreq_cpu_put(policy);
}

static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
@@ -379,6 +407,51 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
	stat->cpu = policy->cpu;
}

static void cpufreq_powerstats_create(unsigned int cpu,
		struct cpufreq_frequency_table *table, int count) {
	unsigned int alloc_size, i = 0, ret = 0;
	struct cpufreq_power_stats *powerstats;
	struct cpufreq_frequency_table *pos;
	struct device_node *cpu_node;
	char device_path[16];

	powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
			GFP_KERNEL);
	if (!powerstats)
		return;

	/* Allocate memory for freq table per cpu as well as clockticks per
	 * freq*/
	alloc_size = count * sizeof(unsigned int) +
		count * sizeof(unsigned int);
	powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
	if (!powerstats->curr) {
		kfree(powerstats);
		return;
	}
	powerstats->freq_table = powerstats->curr + count;

	spin_lock(&cpufreq_stats_lock);
	i = 0;
	cpufreq_for_each_valid_entry(pos, table)
		powerstats->freq_table[i++] = pos->frequency;
	powerstats->state_num = i;

	snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
	cpu_node = of_find_node_by_path(device_path);
	if (cpu_node) {
		ret = of_property_read_u32_array(cpu_node, "current",
				powerstats->curr, count);
		if (ret) {
			kfree(powerstats->curr);
			kfree(powerstats);
			powerstats = NULL;
		}
	}
	per_cpu(cpufreq_power_stats, cpu) = powerstats;
	spin_unlock(&cpufreq_stats_lock);
}

static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
{
	unsigned int lhs = *(const unsigned int *)(lhs_ptr);
@@ -423,24 +496,14 @@ static void add_all_freq_table(unsigned int freq)
	all_freq_table->freq_table[all_freq_table->table_size++] = freq;
}

static void cpufreq_allstats_create(unsigned int cpu)
static void cpufreq_allstats_create(unsigned int cpu,
		struct cpufreq_frequency_table *table, int count)
{
	int i , j = 0;
	unsigned int alloc_size, count = 0;
	struct cpufreq_frequency_table *table = cpufreq_frequency_get_table(cpu);
	unsigned int alloc_size;
	struct all_cpufreq_stats *all_stat;
	bool sort_needed = false;

	if (!table)
		return;

	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		count++;
	}

	all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
			GFP_KERNEL);
	if (!all_stat) {
@@ -479,22 +542,63 @@ static void cpufreq_allstats_create(unsigned int cpu)
	spin_unlock(&cpufreq_stats_lock);
}

static void cpufreq_stats_create_table(unsigned int cpu)
{
	struct cpufreq_policy *policy;
	struct cpufreq_frequency_table *table, *pos;
	int count = 0;
	/*
	 * "likely(!policy)" because normally cpufreq_stats will be registered
	 * before cpufreq driver
	 */
	policy = cpufreq_cpu_get(cpu);
	if (likely(!policy))
		return;

	table = cpufreq_frequency_get_table(policy->cpu);
	if (likely(table)) {
		cpufreq_for_each_valid_entry(pos, table);
			count++;

		if (!per_cpu(all_cpufreq_stats, cpu))
			cpufreq_allstats_create(cpu, table, count);

		if (!per_cpu(cpufreq_power_stats, cpu))
			cpufreq_powerstats_create(cpu, table, count);

		__cpufreq_stats_create_table(policy, table, count);
	}
	cpufreq_cpu_put(policy);
}

static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
		unsigned long val, void *data)
{
	int ret = 0;
	int ret = 0, count = 0;
	struct cpufreq_policy *policy = data;
	struct cpufreq_frequency_table *table, *pos;
	unsigned int cpu = policy->cpu;

	if (val == CPUFREQ_UPDATE_POLICY_CPU) {
		cpufreq_stats_update_policy_cpu(policy);
		return 0;
	}

	if (!per_cpu(all_cpufreq_stats, policy->cpu))
		cpufreq_allstats_create(policy->cpu);
	table = cpufreq_frequency_get_table(cpu);
	if (!table)
		return 0;

	cpufreq_for_each_valid_entry(pos, table);
		count++;

	if (!per_cpu(all_cpufreq_stats, cpu))
		cpufreq_allstats_create(cpu, table, count);

	if (!per_cpu(cpufreq_power_stats, cpu))
		cpufreq_powerstats_create(cpu, table, count);

	if (val == CPUFREQ_CREATE_POLICY)
		ret = __cpufreq_stats_create_table(policy);
		ret = __cpufreq_stats_create_table(policy, table, count);
	else if (val == CPUFREQ_REMOVE_POLICY)
		__cpufreq_stats_free_table(policy);

@@ -574,7 +678,12 @@ static int __init cpufreq_stats_init(void)
	ret = sysfs_create_file(cpufreq_global_kobject,
			&_attr_all_time_in_state.attr);
	if (ret)
		pr_warn("Error creating sysfs file for cpufreq stats\n");
		pr_warn("Cannot create sysfs file for cpufreq stats\n");

	ret = sysfs_create_file(cpufreq_global_kobject,
			&_attr_current_in_state.attr);
	if (ret)
		pr_warn("Cannot create sysfs file for cpufreq current stats\n");

	return 0;
}
@@ -589,6 +698,7 @@ static void __exit cpufreq_stats_exit(void)
	for_each_online_cpu(cpu)
		cpufreq_stats_free_table(cpu);
	cpufreq_allstats_free();
	cpufreq_powerstats_free();
	cpufreq_put_global_kobject();
}

+1 −1
Original line number Diff line number Diff line
@@ -226,7 +226,7 @@ static int __init proc_uid_cputime_init(void)
	proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
					NULL);

	proc_create_data("show_uid_stat", S_IWUGO, parent, &uid_stat_fops,
	proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
					NULL);

	profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+2 −0
Original line number Diff line number Diff line
@@ -304,6 +304,8 @@ static void __save_error_info(struct super_block *sb, const char *func,
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
	if (bdev_read_only(sb->s_bdev))
		return;
	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
	es->s_last_error_time = cpu_to_le32(get_seconds());
	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
+1 −1
Original line number Diff line number Diff line
@@ -948,7 +948,7 @@ static inline int cgroup_attach_task_all(struct task_struct *from,
static inline int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css,
					     void *tset)
{
	return 0;
	return -EINVAL;
}
#endif /* !CONFIG_CGROUPS */

Loading