Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1a687c2e authored by Mel Gorman's avatar Mel Gorman
Browse files

mm: sched: numa: Control enabling and disabling of NUMA balancing



This patch adds Kconfig options and kernel parameters to allow the
enabling and disabling of automatic NUMA balancing. The existance
of such a switch was and is very important when debugging problems
related to transparent hugepages and we should have the same for
automatic NUMA placement.

Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
parent b8593bfd
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -1996,6 +1996,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

	nr_uarts=	[SERIAL] maximum number of UARTs to be registered.

	numa_balancing=	[KNL,X86] Enable or disable automatic NUMA balancing.
			Allowed values are enable and disable

	numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
			one of ['zone', 'node', 'default'] can be specified
			This can be set from sysctl after boot.
+4 −0
Original line number Diff line number Diff line
@@ -1563,10 +1563,14 @@ struct task_struct {

#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
#else
static inline void task_numa_fault(int node, int pages, bool migrated)
{
}
static inline void set_numabalancing_state(bool enabled)
{
}
#endif

/*
+8 −0
Original line number Diff line number Diff line
@@ -720,6 +720,14 @@ config ARCH_USES_NUMA_PROT_NONE
	depends on ARCH_WANTS_PROT_NUMA_PROT_NONE
	depends on NUMA_BALANCING

config NUMA_BALANCING_DEFAULT_ENABLED
	bool "Automatically enable NUMA aware memory/task placement"
	default y
	depends on NUMA_BALANCING
	help
	  If set, autonumic NUMA balancing will be enabled if running on a NUMA
	  machine.

config NUMA_BALANCING
	bool "Memory placement aware NUMA scheduler"
	default y
+33 −15
Original line number Diff line number Diff line
@@ -192,23 +192,10 @@ static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
#endif /* HAVE_JUMP_LABEL */

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
static int sched_feat_set(char *cmp)
{
	char buf[64];
	char *cmp;
	int neg = 0;
	int i;

	if (cnt > 63)
		cnt = 63;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;
	cmp = strstrip(buf);
	int neg = 0;

	if (strncmp(cmp, "NO_", 3) == 0) {
		neg = 1;
@@ -228,6 +215,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
		}
	}

	return i;
}

static ssize_t
sched_feat_write(struct file *filp, const char __user *ubuf,
		size_t cnt, loff_t *ppos)
{
	char buf[64];
	char *cmp;
	int i;

	if (cnt > 63)
		cnt = 63;

	if (copy_from_user(&buf, ubuf, cnt))
		return -EFAULT;

	buf[cnt] = 0;
	cmp = strstrip(buf);

	i = sched_feat_set(cmp);
	if (i == __SCHED_FEAT_NR)
		return -EINVAL;

@@ -1549,6 +1557,16 @@ static void __sched_fork(struct task_struct *p)
#endif /* CONFIG_NUMA_BALANCING */
}

#ifdef CONFIG_NUMA_BALANCING
void set_numabalancing_state(bool enabled)
{
	if (enabled)
		sched_feat_set("NUMA");
	else
		sched_feat_set("NO_NUMA");
}
#endif /* CONFIG_NUMA_BALANCING */

/*
 * fork()/clone()-time setup:
 */
+3 −0
Original line number Diff line number Diff line
@@ -811,6 +811,9 @@ void task_numa_fault(int node, int pages, bool migrated)
{
	struct task_struct *p = current;

	if (!sched_feat_numa(NUMA))
		return;

	/* FIXME: Allocate task-specific structure for placement policy here */

	/*
Loading