Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5f59f08 authored by Mike Travis's avatar Mike Travis Committed by Ingo Molnar
Browse files

nodemask: use new node_to_cpumask_ptr function



  * Use new node_to_cpumask_ptr.  This creates a pointer to the
    cpumask for a given node.  This definition is in mm patch:

	asm-generic-add-node_to_cpumask_ptr-macro.patch

  * Use new set_cpus_allowed_ptr function.

Depends on:
	[mm-patch]: asm-generic-add-node_to_cpumask_ptr-macro.patch
	[sched-devel]: sched: add new set_cpus_allowed_ptr function
	[x86/latest]: x86: add cpus_scnprintf function

Cc: Greg Kroah-Hartman <gregkh@suse.de>
Cc: Greg Banks <gnb@melbourne.sgi.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent b53e921b
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -22,14 +22,15 @@ static struct sysdev_class node_class = {
static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
{
	struct node *node_dev = to_node(dev);
	cpumask_t mask = node_to_cpumask(node_dev->sysdev.id);
	node_to_cpumask_ptr(mask, node_dev->sysdev.id);
	int len;

	/* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */
	BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2);

	len = cpumask_scnprintf(buf, PAGE_SIZE-1, mask);
	len += sprintf(buf + len, "\n");
	len = cpumask_scnprintf(buf, PAGE_SIZE-2, *mask);
 	buf[len++] = '\n';
 	buf[len] = '\0';
	return len;
}

+14 −15
Original line number Diff line number Diff line
@@ -6448,7 +6448,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
 *
 * Should use nodemask_t.
 */
static int find_next_best_node(int node, unsigned long *used_nodes)
static int find_next_best_node(int node, nodemask_t *used_nodes)
{
	int i, n, val, min_val, best_node = 0;

@@ -6462,7 +6462,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
			continue;

		/* Skip already used nodes */
		if (test_bit(n, used_nodes))
		if (node_isset(n, *used_nodes))
			continue;

		/* Simple min distance search */
@@ -6474,14 +6474,13 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
		}
	}

	set_bit(best_node, used_nodes);
	node_set(best_node, *used_nodes);
	return best_node;
}

/**
 * sched_domain_node_span - get a cpumask for a node's sched_domain
 * @node: node whose cpumask we're constructing
 * @size: number of nodes to include in this span
 *
 * Given a node, construct a good cpumask for its sched_domain to span. It
 * should be one that prevents unnecessary balancing, but also spreads tasks
@@ -6489,22 +6488,22 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
 */
static cpumask_t sched_domain_node_span(int node)
{
	DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
	cpumask_t span, nodemask;
	nodemask_t used_nodes;
	cpumask_t span;
	node_to_cpumask_ptr(nodemask, node);
	int i;

	cpus_clear(span);
	bitmap_zero(used_nodes, MAX_NUMNODES);
	nodes_clear(used_nodes);

	nodemask = node_to_cpumask(node);
	cpus_or(span, span, nodemask);
	set_bit(node, used_nodes);
	cpus_or(span, span, *nodemask);
	node_set(node, used_nodes);

	for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
		int next_node = find_next_best_node(node, used_nodes);
		int next_node = find_next_best_node(node, &used_nodes);

		nodemask = node_to_cpumask(next_node);
		cpus_or(span, span, nodemask);
		node_to_cpumask_ptr_next(nodemask, next_node);
		cpus_or(span, span, *nodemask);
	}

	return span;
@@ -6901,6 +6900,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
		for (j = 0; j < MAX_NUMNODES; j++) {
			cpumask_t tmp, notcovered;
			int n = (i + j) % MAX_NUMNODES;
			node_to_cpumask_ptr(pnodemask, n);

			cpus_complement(notcovered, covered);
			cpus_and(tmp, notcovered, *cpu_map);
@@ -6908,8 +6908,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
			if (cpus_empty(tmp))
				break;

			nodemask = node_to_cpumask(n);
			cpus_and(tmp, tmp, nodemask);
			cpus_and(tmp, tmp, *pnodemask);
			if (cpus_empty(tmp))
				continue;

+3 −3
Original line number Diff line number Diff line
@@ -2029,6 +2029,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
	int n, val;
	int min_val = INT_MAX;
	int best_node = -1;
	node_to_cpumask_ptr(tmp, 0);

	/* Use the local node if we haven't already */
	if (!node_isset(node, *used_node_mask)) {
@@ -2037,7 +2038,6 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
	}

	for_each_node_state(n, N_HIGH_MEMORY) {
		cpumask_t tmp;

		/* Don't want a node to appear more than once */
		if (node_isset(n, *used_node_mask))
@@ -2050,8 +2050,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
		val += (n < node);

		/* Give preference to headless and unused nodes */
		tmp = node_to_cpumask(n);
		if (!cpus_empty(tmp))
		node_to_cpumask_ptr_next(tmp, n);
		if (!cpus_empty(*tmp))
			val += PENALTY_FOR_NODE_WITH_CPUS;

		/* Slight preference for less loaded node */
+2 −3
Original line number Diff line number Diff line
@@ -1160,14 +1160,13 @@ static void __cpuinit cpuup_canceled(long cpu)
	struct kmem_cache *cachep;
	struct kmem_list3 *l3 = NULL;
	int node = cpu_to_node(cpu);
	node_to_cpumask_ptr(mask, node);

	list_for_each_entry(cachep, &cache_chain, next) {
		struct array_cache *nc;
		struct array_cache *shared;
		struct array_cache **alien;
		cpumask_t mask;

		mask = node_to_cpumask(node);
		/* cpu is dead; no one can alloc from it. */
		nc = cachep->array[cpu];
		cachep->array[cpu] = NULL;
@@ -1183,7 +1182,7 @@ static void __cpuinit cpuup_canceled(long cpu)
		if (nc)
			free_block(cachep, nc->entry, nc->avail, node);

		if (!cpus_empty(mask)) {
		if (!cpus_empty(*mask)) {
			spin_unlock_irq(&l3->list_lock);
			goto free_array_cache;
		}
+8 −10
Original line number Diff line number Diff line
@@ -1647,11 +1647,10 @@ static int kswapd(void *p)
	struct reclaim_state reclaim_state = {
		.reclaimed_slab = 0,
	};
	cpumask_t cpumask;
	node_to_cpumask_ptr(cpumask, pgdat->node_id);

	cpumask = node_to_cpumask(pgdat->node_id);
	if (!cpus_empty(cpumask))
		set_cpus_allowed(tsk, cpumask);
	if (!cpus_empty(*cpumask))
		set_cpus_allowed_ptr(tsk, cpumask);
	current->reclaim_state = &reclaim_state;

	/*
@@ -1880,17 +1879,16 @@ out:
static int __devinit cpu_callback(struct notifier_block *nfb,
				  unsigned long action, void *hcpu)
{
	pg_data_t *pgdat;
	cpumask_t mask;
	int nid;

	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
		for_each_node_state(nid, N_HIGH_MEMORY) {
			pgdat = NODE_DATA(nid);
			mask = node_to_cpumask(pgdat->node_id);
			if (any_online_cpu(mask) != NR_CPUS)
			pg_data_t *pgdat = NODE_DATA(nid);
			node_to_cpumask_ptr(mask, pgdat->node_id);

			if (any_online_cpu(*mask) < nr_cpu_ids)
				/* One of our CPUs online: restore mask */
				set_cpus_allowed(pgdat->kswapd, mask);
				set_cpus_allowed_ptr(pgdat->kswapd, mask);
		}
	}
	return NOTIFY_OK;
Loading