Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 18b2bd02 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "kernel: Add snapshot of changes to support cpu isolation"

parents a00df951 201ea482
Loading
Loading
Loading
Loading
+29 −0
Original line number Diff line number Diff line
@@ -471,6 +471,26 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
	return core_mask;
}

#ifdef CONFIG_SCHED_WALT
void update_possible_siblings_masks(unsigned int cpuid)
{
	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
	int cpu;

	if (cpuid_topo->package_id == -1)
		return;

	for_each_possible_cpu(cpu) {
		cpu_topo = &cpu_topology[cpu];

		if (cpuid_topo->package_id != cpu_topo->package_id)
			continue;
		cpumask_set_cpu(cpuid, &cpu_topo->core_possible_sibling);
		cpumask_set_cpu(cpu, &cpuid_topo->core_possible_sibling);
	}
}
#endif

void update_siblings_masks(unsigned int cpuid)
{
	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -550,6 +570,9 @@ __weak int __init parse_acpi_topology(void)
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
#ifdef CONFIG_SCHED_WALT
	int cpu;
#endif
	reset_cpu_topology();

	/*
@@ -560,5 +583,11 @@ void __init init_cpu_topology(void)
		reset_cpu_topology();
	else if (of_have_populated_dt() && parse_dt_topology())
		reset_cpu_topology();
#ifdef CONFIG_SCHED_WALT
	else {
		for_each_possible_cpu(cpu)
			update_possible_siblings_masks(cpu);
	}
#endif
}
#endif
+5 −0
Original line number Diff line number Diff line
@@ -1156,6 +1156,11 @@ int lock_device_hotplug_sysfs(void)
	return restart_syscall();
}

void lock_device_hotplug_assert(void)
{
	lockdep_assert_held(&device_hotplug_lock);
}

#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
+93 −0
Original line number Diff line number Diff line
@@ -183,9 +183,94 @@ static struct attribute_group crash_note_cpu_attr_group = {
};
#endif

#ifdef CONFIG_HOTPLUG_CPU
static ssize_t isolate_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpuid = cpu->dev.id;
	unsigned int isolated = cpu_isolated(cpuid);

	rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", isolated);

	return rc;
}

static DEVICE_ATTR_RO(isolate);

static struct attribute *cpu_isolated_attrs[] = {
	&dev_attr_isolate.attr,
	NULL
};

static struct attribute_group cpu_isolated_attr_group = {
	.attrs = cpu_isolated_attrs,
};
#endif

#ifdef CONFIG_SCHED_WALT
static ssize_t sched_load_boost_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	ssize_t rc;
	int boost;
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;

	boost = per_cpu(sched_load_boost, cpuid);
	rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", boost);

	return rc;
}

static ssize_t __ref sched_load_boost_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	int err;
	int boost;
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int cpuid = cpu->dev.id;

	err = kstrtoint(strstrip((char *)buf), 0, &boost);
	if (err)
		return err;

	/*
	 * -100 is low enough to cancel out CPU's load and make it near zro.
	 * 1000 is close to the maximum value that cpu_util_freq_{walt,pelt}
	 * can take without overflow.
	 */
	if (boost < -100 || boost > 1000)
		return -EINVAL;

	per_cpu(sched_load_boost, cpuid) = boost;

	return count;
}

static DEVICE_ATTR_RW(sched_load_boost);

static struct attribute *sched_cpu_attrs[] = {
	&dev_attr_sched_load_boost.attr,
	NULL
};

static struct attribute_group sched_cpu_attr_group = {
	.attrs = sched_cpu_attrs,
};
#endif

static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
#ifdef CONFIG_SCHED_WALT
	&sched_cpu_attr_group,
#endif
	NULL
};
@@ -193,6 +278,12 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
#ifdef CONFIG_SCHED_WALT
	&sched_cpu_attr_group,
#endif
	NULL
};
@@ -223,6 +314,7 @@ static struct cpu_attr cpu_attrs[] = {
	_CPU_ATTR(online, &__cpu_online_mask),
	_CPU_ATTR(possible, &__cpu_possible_mask),
	_CPU_ATTR(present, &__cpu_present_mask),
	_CPU_ATTR(core_ctl_isolated, &__cpu_isolated_mask),
};

/*
@@ -472,6 +564,7 @@ static struct attribute *cpu_root_attrs[] = {
	&cpu_attrs[0].attr.attr,
	&cpu_attrs[1].attr.attr,
	&cpu_attrs[2].attr.attr,
	&cpu_attrs[3].attr.attr,
	&dev_attr_kernel_max.attr,
	&dev_attr_offline.attr,
	&dev_attr_isolated.attr,
+57 −0
Original line number Diff line number Diff line
@@ -1459,6 +1459,56 @@ static const struct file_operations proc_pid_sched_operations = {

#endif

/*
 * Print out various scheduling related per-task fields:
 */

#ifdef CONFIG_SCHED_WALT
extern int __weak sched_wake_up_idle_show(struct seq_file *m, void *v);
extern ssize_t __weak sched_wake_up_idle_write(struct file *file,
		const char __user *buf, size_t count, loff_t *offset);
extern int __weak sched_wake_up_idle_open(struct inode *inode,
						struct file *filp);

static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
	.open		= sched_wake_up_idle_open,
	.read		= seq_read,
	.write		= sched_wake_up_idle_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

extern int __weak sched_init_task_load_show(struct seq_file *m, void *v);
extern ssize_t __weak
sched_init_task_load_write(struct file *file, const char __user *buf,
					size_t count, loff_t *offset);
extern int __weak
sched_init_task_load_open(struct inode *inode, struct file *filp);

static const struct file_operations proc_pid_sched_init_task_load_operations = {
	.open		= sched_init_task_load_open,
	.read		= seq_read,
	.write		= sched_init_task_load_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

extern int __weak sched_group_id_show(struct seq_file *m, void *v);
extern ssize_t __weak
sched_group_id_write(struct file *file, const char __user *buf,
					size_t count, loff_t *offset);
extern int __weak sched_group_id_open(struct inode *inode, struct file *filp);

static const struct file_operations proc_pid_sched_group_id_operations = {
	.open		= sched_group_id_open,
	.read		= seq_read,
	.write		= sched_group_id_write,
	.llseek		= seq_lseek,
	.release	= single_release,
};

#endif	/* CONFIG_SCHED_WALT */

#ifdef CONFIG_SCHED_AUTOGROUP
/*
 * Print out autogroup related information:
@@ -3011,6 +3061,13 @@ static const struct pid_entry tgid_base_stuff[] = {
	ONE("status",     S_IRUGO, proc_pid_status),
	ONE("personality", S_IRUSR, proc_pid_personality),
	ONE("limits",	  S_IRUGO, proc_pid_limits),
#ifdef CONFIG_SCHED_WALT
	REG("sched_wake_up_idle", 00644,
				proc_pid_sched_wake_up_idle_operations),
	REG("sched_init_task_load", 00644,
				proc_pid_sched_init_task_load_operations),
	REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
#endif
#ifdef CONFIG_SCHED_DEBUG
	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
+7 −0
Original line number Diff line number Diff line
@@ -48,6 +48,9 @@ struct cpu_topology {
	int llc_id;
	cpumask_t thread_sibling;
	cpumask_t core_sibling;
#ifdef CONFIG_SCHED_WALT
	cpumask_t core_possible_sibling;
#endif
	cpumask_t llc_sibling;
};

@@ -59,6 +62,10 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling)
#define topology_llc_cpumask(cpu)	(&cpu_topology[cpu].llc_sibling)
#ifdef CONFIG_SCHED_WALT
#define topology_possible_sibling_cpumask(cpu)		\
				(&cpu_topology[cpu].core_possible_sibling)
#endif
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
Loading