Loading drivers/base/cpu.c +98 −0 Original line number Diff line number Diff line Loading @@ -180,9 +180,104 @@ static struct attribute_group crash_note_cpu_attr_group = { }; #endif #ifdef CONFIG_SCHED_HMP static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t rc; int cpuid = cpu->dev.id; unsigned int pwr_cost; pwr_cost = sched_get_static_cpu_pwr_cost(cpuid); rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost); return rc; } static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, dev); int err; int cpuid = cpu->dev.id; unsigned int pwr_cost; err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost); if (err) return err; err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost); if (err >= 0) err = count; return err; } static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t rc; int cpuid = cpu->dev.id; unsigned int pwr_cost; pwr_cost = sched_get_static_cluster_pwr_cost(cpuid); rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost); return rc; } static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, dev); int err; int cpuid = cpu->dev.id; unsigned int pwr_cost; err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost); if (err) return err; err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost); if (err >= 0) err = count; return err; } static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644, show_sched_static_cpu_pwr_cost, store_sched_static_cpu_pwr_cost); static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644, show_sched_static_cluster_pwr_cost, store_sched_static_cluster_pwr_cost); static struct attribute *hmp_sched_cpu_attrs[] = { &dev_attr_sched_static_cpu_pwr_cost.attr, &dev_attr_sched_static_cluster_pwr_cost.attr, NULL }; static struct attribute_group sched_hmp_cpu_attr_group = { .attrs = hmp_sched_cpu_attrs, }; #endif /* CONFIG_SCHED_HMP */ static const struct attribute_group *common_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, #endif #ifdef CONFIG_SCHED_HMP &sched_hmp_cpu_attr_group, #endif NULL }; Loading @@ -190,6 +285,9 @@ static const struct attribute_group *common_cpu_attr_groups[] = { static const struct attribute_group *hotplugable_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, #endif #ifdef CONFIG_SCHED_HMP &sched_hmp_cpu_attr_group, #endif NULL }; Loading kernel/sched/fair.c +7 −0 Original line number Diff line number Diff line Loading @@ -9442,6 +9442,13 @@ static int active_load_balance_cpu_stop(void *data) raw_spin_unlock(&busiest_rq->lock); if (push_task) { if (push_task_detached) attach_one_task(target_rq, push_task); put_task_struct(push_task); clear_reserved(target_cpu); } if (p) attach_one_task(target_rq, p); Loading Loading
drivers/base/cpu.c +98 −0 Original line number Diff line number Diff line Loading @@ -180,9 +180,104 @@ static struct attribute_group crash_note_cpu_attr_group = { }; #endif #ifdef CONFIG_SCHED_HMP static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t rc; int cpuid = cpu->dev.id; unsigned int pwr_cost; pwr_cost = sched_get_static_cpu_pwr_cost(cpuid); rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost); return rc; } static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, dev); int err; int cpuid = cpu->dev.id; unsigned int pwr_cost; err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost); if (err) return err; err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost); if (err >= 0) err = count; return err; } static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t rc; int cpuid = cpu->dev.id; unsigned int pwr_cost; pwr_cost = sched_get_static_cluster_pwr_cost(cpuid); rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost); return rc; } static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, dev); int err; int cpuid = cpu->dev.id; unsigned int pwr_cost; err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost); if (err) return err; err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost); if (err >= 0) err = count; return err; } static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644, show_sched_static_cpu_pwr_cost, store_sched_static_cpu_pwr_cost); static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644, show_sched_static_cluster_pwr_cost, store_sched_static_cluster_pwr_cost); static struct attribute *hmp_sched_cpu_attrs[] = { &dev_attr_sched_static_cpu_pwr_cost.attr, &dev_attr_sched_static_cluster_pwr_cost.attr, NULL }; static struct attribute_group sched_hmp_cpu_attr_group = { .attrs = hmp_sched_cpu_attrs, }; #endif /* CONFIG_SCHED_HMP */ static const struct attribute_group *common_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, #endif #ifdef CONFIG_SCHED_HMP &sched_hmp_cpu_attr_group, #endif NULL }; Loading @@ -190,6 +285,9 @@ static const struct attribute_group *common_cpu_attr_groups[] = { static const struct attribute_group *hotplugable_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, #endif #ifdef CONFIG_SCHED_HMP &sched_hmp_cpu_attr_group, #endif NULL }; Loading
kernel/sched/fair.c +7 −0 Original line number Diff line number Diff line Loading @@ -9442,6 +9442,13 @@ static int active_load_balance_cpu_stop(void *data) raw_spin_unlock(&busiest_rq->lock); if (push_task) { if (push_task_detached) attach_one_task(target_rq, push_task); put_task_struct(push_task); clear_reserved(target_cpu); } if (p) attach_one_task(target_rq, p); Loading