Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 487dec64 authored by Puja Gupta's avatar Puja Gupta Committed by Pavankumar Kondeti
Browse files

sched: Remove HMP related code and config



Since HMP is no longer used, remove related code and config options.

Change-Id: I4a30bbdf34c63c9c400e5b826a4758eac26ce607
Signed-off-by: default avatarPuja Gupta <pujag@codeaurora.org>
Signed-off-by: default avatarPavankumar Kondeti <pkondeti@codeaurora.org>
parent 509d2944
Loading
Loading
Loading
Loading
+0 −138
Original line number Diff line number Diff line
@@ -208,145 +208,10 @@ static struct attribute_group cpu_isolated_attr_group = {

#endif

#ifdef CONFIG_SCHED_HMP

static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	pwr_cost = sched_get_static_cpu_pwr_cost(cpuid);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);

	return rc;
}

static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int err;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
	if (err)
		return err;

	err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost);

	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	pwr_cost = sched_get_static_cluster_pwr_cost(cpuid);

	rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost);

	return rc;
}

static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int err;
	int cpuid = cpu->dev.id;
	unsigned int pwr_cost;

	err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost);
	if (err)
		return err;

	err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost);

	if (err >= 0)
		err = count;

	return err;
}

static ssize_t show_sched_cluser_wake_idle(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	ssize_t rc;
	int cpuid = cpu->dev.id;
	unsigned int wake_up_idle;

	wake_up_idle = sched_get_cluster_wake_idle(cpuid);

	rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", wake_up_idle);

	return rc;
}

static ssize_t __ref store_sched_cluster_wake_idle(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t count)
{
	struct cpu *cpu = container_of(dev, struct cpu, dev);
	int err;
	int cpuid = cpu->dev.id;
	unsigned int wake_up_idle;

	err = kstrtouint(strstrip((char *)buf), 0, &wake_up_idle);
	if (err)
		return err;

	err = sched_set_cluster_wake_idle(cpuid, wake_up_idle);

	if (err >= 0)
		err = count;

	return err;
}

static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644,
					show_sched_static_cpu_pwr_cost,
					store_sched_static_cpu_pwr_cost);
static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644,
					show_sched_static_cluster_pwr_cost,
					store_sched_static_cluster_pwr_cost);
static DEVICE_ATTR(sched_cluster_wake_up_idle, 0644,
					show_sched_cluser_wake_idle,
					store_sched_cluster_wake_idle);

static struct attribute *hmp_sched_cpu_attrs[] = {
	&dev_attr_sched_static_cpu_pwr_cost.attr,
	&dev_attr_sched_static_cluster_pwr_cost.attr,
	&dev_attr_sched_cluster_wake_up_idle.attr,
	NULL
};

static struct attribute_group sched_hmp_cpu_attr_group = {
	.attrs = hmp_sched_cpu_attrs,
};

#endif /* CONFIG_SCHED_HMP */
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
@@ -357,9 +222,6 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
#ifdef CONFIG_SCHED_HMP
	&sched_hmp_cpu_attr_group,
#endif
#ifdef CONFIG_HOTPLUG_CPU
	&cpu_isolated_attr_group,
#endif
+4 −3
Original line number Diff line number Diff line
@@ -699,7 +699,8 @@ static void cpufreq_interactive_timer(int data)
	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
	cpumask_set_cpu(max_cpu, &speedchange_cpumask);
	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
	wake_up_process_no_notif(speedchange_task);

	wake_up_process(speedchange_task);

rearm:
	cpufreq_interactive_timer_resched(data, false);
@@ -814,7 +815,7 @@ static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunab
	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);

	if (anyboost)
		wake_up_process_no_notif(speedchange_task);
		wake_up_process(speedchange_task);
}

static int load_change_callback(struct notifier_block *nb, unsigned long val,
@@ -1926,7 +1927,7 @@ static int __init cpufreq_interactive_gov_init(void)
	get_task_struct(speedchange_task);

	/* NB: wake up so the thread does not look hung to the freezer */
	wake_up_process_no_notif(speedchange_task);
	wake_up_process(speedchange_task);

	return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
}
+1 −1
Original line number Diff line number Diff line
@@ -1632,7 +1632,7 @@ static const struct file_operations proc_pid_sched_group_id_operations = {
	.release	= single_release,
};

#endif	/* CONFIG_SCHED_HMP */
#endif	/* CONFIG_SCHED_WALT */

#ifdef CONFIG_SCHED_AUTOGROUP
/*
+1 −41
Original line number Diff line number Diff line
@@ -1503,9 +1503,6 @@ struct ravg {
	u32 sum_history[RAVG_HIST_SIZE_MAX];
	u32 *curr_window_cpu, *prev_window_cpu;
	u32 curr_window, prev_window;
#ifdef CONFIG_SCHED_HMP
	u64 curr_burst, avg_burst, avg_sleep_time;
#endif
	u16 active_windows;
	u32 pred_demand;
	u8 busy_buckets[NUM_BUSY_BUCKETS];
@@ -2653,38 +2650,10 @@ struct cpu_cycle_counter_cb {

#define MAX_NUM_CGROUP_COLOC_ID	20

#ifdef CONFIG_SCHED_HMP
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
				const struct cpumask *query_cpus);
extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
extern u32 sched_get_init_task_load(struct task_struct *p);
extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle);
extern unsigned int sched_get_cluster_wake_idle(int cpu);
extern int sched_update_freq_max_load(const cpumask_t *cpumask);
extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
							u32 fmin, u32 fmax);
extern void sched_set_cpu_cstate(int cpu, int cstate,
			 int wakeup_energy, int wakeup_latency);
extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
				int wakeup_energy, int wakeup_latency);
extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int sched_get_group_id(struct task_struct *p);

#else /* CONFIG_SCHED_HMP */
static inline int sched_set_window(u64 window_start, unsigned int window_size)
{
	return -EINVAL;
}
static inline unsigned long sched_get_busy(int cpu)
{
	return 0;
}
static inline void sched_get_cpus_busy(struct sched_load *busy,
				       const struct cpumask *query_cpus) {};

@@ -2698,12 +2667,6 @@ sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
{
}

static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
			int dstate, int wakeup_energy, int wakeup_latency)
{
}
#endif /* CONFIG_SCHED_HMP */

#ifdef CONFIG_SCHED_WALT
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern void sched_set_io_is_busy(int val);
@@ -2731,10 +2694,8 @@ static inline void free_task_load_ptrs(struct task_struct *p) { }
#endif /* CONFIG_SCHED_WALT */

#ifndef CONFIG_SCHED_WALT
#ifndef CONFIG_SCHED_HMP
static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
					u32 fmin, u32 fmax) { }
#endif /* CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_WALT */

#ifdef CONFIG_NO_HZ_COMMON
@@ -2847,7 +2808,7 @@ extern unsigned long long
task_sched_runtime(struct task_struct *task);

/* sched_exec is called by processes performing an exec */
#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec()   {}
@@ -2982,7 +2943,6 @@ extern void xtime_update(unsigned long ticks);

extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern int wake_up_process_no_notif(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
 extern void kick_process(struct task_struct *tsk);
+1 −35
Original line number Diff line number Diff line
@@ -37,47 +37,13 @@ extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
extern unsigned int sysctl_sched_group_downmigrate_pct;
#endif

#ifdef CONFIG_SCHED_HMP

enum freq_reporting_policy {
	FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK,
	FREQ_REPORT_CPU_LOAD,
	FREQ_REPORT_TOP_TASK,
	FREQ_REPORT_INVALID_POLICY
};

extern int sysctl_sched_freq_inc_notify;
extern int sysctl_sched_freq_dec_notify;
extern unsigned int sysctl_sched_freq_reporting_policy;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_hist_size;
extern unsigned int sysctl_sched_spill_nr_run;
extern unsigned int sysctl_sched_spill_load_pct;
extern unsigned int sysctl_sched_upmigrate_pct;
extern unsigned int sysctl_sched_downmigrate_pct;
extern unsigned int sysctl_early_detection_duration;
extern unsigned int sysctl_sched_small_wakee_task_load_pct;
extern unsigned int sysctl_sched_big_waker_task_load_pct;
extern unsigned int sysctl_sched_select_prev_cpu_us;
extern unsigned int sysctl_sched_restrict_cluster_spill;
extern unsigned int sysctl_sched_pred_alert_freq;
extern unsigned int sysctl_sched_freq_aggregate;
extern unsigned int sysctl_sched_enable_thread_grouping;
extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker;
extern unsigned int sysctl_sched_short_burst;
extern unsigned int sysctl_sched_short_sleep;

#elif defined(CONFIG_SCHED_WALT)

extern int
walt_proc_update_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp,
			 loff_t *ppos);

#endif /* CONFIG_SCHED_HMP */
#endif /* CONFIG_SCHED_WALT */

enum sched_tunable_scaling {
	SCHED_TUNABLESCALING_NONE,
Loading