Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b53e921b authored by Mike Travis's avatar Mike Travis Committed by Ingo Molnar
Browse files

generic: reduce stack pressure in sched_affinity



  * Modify sched_affinity functions to pass cpumask_t variables by reference
    instead of by value.

  * Use new set_cpus_allowed_ptr function.

Depends on:
	[sched-devel]: sched: add new set_cpus_allowed_ptr function

Cc: Paul Jackson <pj@sgi.com>
Cc: Cliff Wickman <cpw@sgi.com>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f9a86fcb
Loading
Loading
Loading
Loading
+23 −23
Original line number Diff line number Diff line
@@ -251,18 +251,18 @@ struct threshold_attr {
	ssize_t(*store) (struct threshold_block *, const char *, size_t count);
};

static cpumask_t affinity_set(unsigned int cpu)
static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
					   cpumask_t *newmask)
{
	cpumask_t oldmask = current->cpus_allowed;
	cpumask_t newmask = CPU_MASK_NONE;
	cpu_set(cpu, newmask);
	set_cpus_allowed(current, newmask);
	return oldmask;
	*oldmask = current->cpus_allowed;
	cpus_clear(*newmask);
	cpu_set(cpu, *newmask);
	set_cpus_allowed_ptr(current, newmask);
}

static void affinity_restore(cpumask_t oldmask)
static void affinity_restore(const cpumask_t *oldmask)
{
	set_cpus_allowed(current, oldmask);
	set_cpus_allowed_ptr(current, oldmask);
}

#define SHOW_FIELDS(name)                                           \
@@ -277,15 +277,15 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
				      const char *buf, size_t count)
{
	char *end;
	cpumask_t oldmask;
	cpumask_t oldmask, newmask;
	unsigned long new = simple_strtoul(buf, &end, 0);
	if (end == buf)
		return -EINVAL;
	b->interrupt_enable = !!new;

	oldmask = affinity_set(b->cpu);
	affinity_set(b->cpu, &oldmask, &newmask);
	threshold_restart_bank(b, 0, 0);
	affinity_restore(oldmask);
	affinity_restore(&oldmask);

	return end - buf;
}
@@ -294,7 +294,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
				     const char *buf, size_t count)
{
	char *end;
	cpumask_t oldmask;
	cpumask_t oldmask, newmask;
	u16 old;
	unsigned long new = simple_strtoul(buf, &end, 0);
	if (end == buf)
@@ -306,9 +306,9 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
	old = b->threshold_limit;
	b->threshold_limit = new;

	oldmask = affinity_set(b->cpu);
	affinity_set(b->cpu, &oldmask, &newmask);
	threshold_restart_bank(b, 0, old);
	affinity_restore(oldmask);
	affinity_restore(&oldmask);

	return end - buf;
}
@@ -316,10 +316,10 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
	u32 high, low;
	cpumask_t oldmask;
	oldmask = affinity_set(b->cpu);
	cpumask_t oldmask, newmask;
	affinity_set(b->cpu, &oldmask, &newmask);
	rdmsr(b->address, low, high);
	affinity_restore(oldmask);
	affinity_restore(&oldmask);
	return sprintf(buf, "%x\n",
		       (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
}
@@ -327,10 +327,10 @@ static ssize_t show_error_count(struct threshold_block *b, char *buf)
static ssize_t store_error_count(struct threshold_block *b,
				 const char *buf, size_t count)
{
	cpumask_t oldmask;
	oldmask = affinity_set(b->cpu);
	cpumask_t oldmask, newmask;
	affinity_set(b->cpu, &oldmask, &newmask);
	threshold_restart_bank(b, 1, 0);
	affinity_restore(oldmask);
	affinity_restore(&oldmask);
	return 1;
}

@@ -468,7 +468,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
{
	int i, err = 0;
	struct threshold_bank *b = NULL;
	cpumask_t oldmask = CPU_MASK_NONE;
	cpumask_t oldmask, newmask;
	char name[32];

	sprintf(name, "threshold_bank%i", bank);
@@ -519,10 +519,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)

	per_cpu(threshold_banks, cpu)[bank] = b;

	oldmask = affinity_set(cpu);
	affinity_set(cpu, &oldmask, &newmask);
	err = allocate_threshold_blocks(cpu, bank, 0,
					MSR_IA32_MC0_MISC + bank * 4);
	affinity_restore(oldmask);
	affinity_restore(&oldmask);

	if (err)
		goto out_free;
+1 −1
Original line number Diff line number Diff line
@@ -2034,7 +2034,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
}
#endif

extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);

extern int sched_mc_power_savings, sched_smt_power_savings;
+1 −1
Original line number Diff line number Diff line
@@ -445,7 +445,7 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
	if (retval)
		return retval;

	return sched_setaffinity(pid, new_mask);
	return sched_setaffinity(pid, &new_mask);
}

asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
+2 −2
Original line number Diff line number Diff line
@@ -1007,10 +1007,10 @@ void __synchronize_sched(void)
	if (sched_getaffinity(0, &oldmask) < 0)
		oldmask = cpu_possible_map;
	for_each_online_cpu(cpu) {
		sched_setaffinity(0, cpumask_of_cpu(cpu));
		sched_setaffinity(0, &cpumask_of_cpu(cpu));
		schedule();
	}
	sched_setaffinity(0, oldmask);
	sched_setaffinity(0, &oldmask);
}
EXPORT_SYMBOL_GPL(__synchronize_sched);

+3 −2
Original line number Diff line number Diff line
@@ -4908,9 +4908,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
	return retval;
}

long sched_setaffinity(pid_t pid, cpumask_t new_mask)
long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
{
	cpumask_t cpus_allowed;
	cpumask_t new_mask = *in_mask;
	struct task_struct *p;
	int retval;

@@ -4991,7 +4992,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
	if (retval)
		return retval;

	return sched_setaffinity(pid, new_mask);
	return sched_setaffinity(pid, &new_mask);
}

/*