Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e3646ff authored by Rik van Riel's avatar Rik van Riel Committed by Ingo Molnar
Browse files

mm: numa: Revert temporarily disabling of NUMA migration



With the scan rate code working (at least for multi-instance specjbb),
the large hammer that is "sched: Do not migrate memory immediately after
switching node" can be replaced with something smarter. Revert temporarily
migration disabling and all traces of numa_migrate_seq.

Signed-off-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-61-git-send-email-mgorman@suse.de


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 930aa174
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1340,7 +1340,6 @@ struct task_struct {
#endif
#ifdef CONFIG_NUMA_BALANCING
	int numa_scan_seq;
	int numa_migrate_seq;
	unsigned int numa_scan_period;
	unsigned int numa_scan_period_max;
	unsigned long numa_migrate_retry;
+0 −2
Original line number Diff line number Diff line
@@ -1731,7 +1731,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)

	p->node_stamp = 0ULL;
	p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
	p->numa_migrate_seq = 1;
	p->numa_scan_period = sysctl_numa_balancing_scan_delay;
	p->numa_work.next = &p->numa_work;
	p->numa_faults = NULL;
@@ -4488,7 +4487,6 @@ void sched_setnuma(struct task_struct *p, int nid)
		p->sched_class->put_prev_task(rq, p);

	p->numa_preferred_nid = nid;
	p->numa_migrate_seq = 1;

	if (running)
		p->sched_class->set_curr_task(rq);
+1 −24
Original line number Diff line number Diff line
@@ -1261,16 +1261,8 @@ static void numa_migrate_preferred(struct task_struct *p)
{
	/* Success if task is already running on preferred CPU */
	p->numa_migrate_retry = 0;
	if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) {
		/*
		 * If migration is temporarily disabled due to a task migration
		 * then re-enable it now as the task is running on its
		 * preferred node and memory should migrate locally
		 */
		if (!p->numa_migrate_seq)
			p->numa_migrate_seq++;
	if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
		return;
	}

	/* This task has no NUMA fault statistics yet */
	if (unlikely(p->numa_preferred_nid == -1))
@@ -1367,7 +1359,6 @@ static void task_numa_placement(struct task_struct *p)
	if (p->numa_scan_seq == seq)
		return;
	p->numa_scan_seq = seq;
	p->numa_migrate_seq++;
	p->numa_scan_period_max = task_scan_max(p);

	/* If the task is part of a group prevent parallel updates to group stats */
@@ -4730,20 +4721,6 @@ static void move_task(struct task_struct *p, struct lb_env *env)
	set_task_cpu(p, env->dst_cpu);
	activate_task(env->dst_rq, p, 0);
	check_preempt_curr(env->dst_rq, p, 0);
#ifdef CONFIG_NUMA_BALANCING
	if (p->numa_preferred_nid != -1) {
		int src_nid = cpu_to_node(env->src_cpu);
		int dst_nid = cpu_to_node(env->dst_cpu);

		/*
		 * If the load balancer has moved the task then limit
		 * migrations from taking place in the short term in
		 * case this is a short-lived migration.
		 */
		if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid)
			p->numa_migrate_seq = 0;
	}
#endif
}

/*
+0 −12
Original line number Diff line number Diff line
@@ -2404,18 +2404,6 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
		last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
		if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid)
			goto out;

#ifdef CONFIG_NUMA_BALANCING
		/*
		 * If the scheduler has just moved us away from our
		 * preferred node, do not bother migrating pages yet.
		 * This way a short and temporary process migration will
		 * not cause excessive memory migration.
		 */
		if (thisnid != current->numa_preferred_nid &&
				!current->numa_migrate_seq)
			goto out;
#endif
	}

	if (curnid != polnid)