Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7517e81 authored by Patrick Daly's avatar Patrick Daly
Browse files

psi: Add emergency trigger



Provide a hook for the page allocator to call into before
declaring OOM.

Change-Id: I70e1d19395f8cfba4b56d11446670dd191b0a39f
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
parent 8d6e8215
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -23,6 +23,8 @@ void psi_memstall_leave(unsigned long *flags);

int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);

void psi_emergency_trigger(void);

#ifdef CONFIG_CGROUPS
int psi_cgroup_alloc(struct cgroup *cgrp);
void psi_cgroup_free(struct cgroup *cgrp);
@@ -43,6 +45,8 @@ static inline void psi_init(void) {}
static inline void psi_memstall_enter(unsigned long *flags) {}
static inline void psi_memstall_leave(unsigned long *flags) {}

static inline void psi_emergency_trigger(void){}

#ifdef CONFIG_CGROUPS
static inline int psi_cgroup_alloc(struct cgroup *cgrp)
{
+25 −0
Original line number Diff line number Diff line
@@ -587,6 +587,31 @@ static u64 update_triggers(struct psi_group *group, u64 now)
	return now + group->poll_min_period;
}

void psi_emergency_trigger(void)
{
	struct psi_group *group = &psi_system;
	struct psi_trigger *t;

	if (static_branch_likely(&psi_disabled))
		return;

	/*
	 * In unlikely case that OOM was triggered while adding/
	 * removing triggers.
	 */
	if (!mutex_trylock(&group->trigger_lock))
		return;

	list_for_each_entry(t, &group->triggers, node) {
		trace_psi_event(t->state, t->threshold);

		/* Generate an event */
		if (cmpxchg(&t->event, 0, 1) == 0)
			wake_up_interruptible(&t->event_wait);
	}
	mutex_unlock(&group->trigger_lock);
}

/*
 * Schedule polling if it's not already scheduled. It's safe to call even from
 * hotpath because even though kthread_queue_delayed_work takes worker->lock
+29 −2
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@
#include <linux/mmu_notifier.h>
#include <linux/memory_hotplug.h>
#include <linux/show_mem_notifier.h>
#include <linux/psi.h>

#include <asm/tlb.h>
#include "internal.h"
@@ -74,13 +75,39 @@ DEFINE_MUTEX(oom_lock);

#ifdef CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER
static atomic64_t ulmk_kill_jiffies = ATOMIC64_INIT(INITIAL_JIFFIES);
static unsigned long psi_emergency_jiffies = INITIAL_JIFFIES;
static DEFINE_MUTEX(ulmk_retry_lock);


/*
 * psi_emergency_jiffies represents the last ULMK emergency event.
 * Give ULMK a 2 second window to handle this event.
 * If ULMK has made some progress since then, send another.
 * Repeat as necessary.
 */
bool should_ulmk_retry(void)
{
	unsigned long j = atomic64_read(&ulmk_kill_jiffies);
	unsigned long now, last_kill;
	bool ret = false;

	mutex_lock(&ulmk_retry_lock);
	now = jiffies;
	last_kill = atomic64_read(&ulmk_kill_jiffies);
	if (time_before(now, psi_emergency_jiffies + 2 * HZ)) {
		ret = true;
		goto out;
	}

	if (time_after_eq(last_kill, psi_emergency_jiffies)) {
		psi_emergency_jiffies = now;
		psi_emergency_trigger();
		ret = true;
		goto out;
	}

	return time_before(jiffies, j + 2 * HZ);
out:
	mutex_unlock(&ulmk_retry_lock);
	return ret;
}

void ulmk_update_last_kill(void)
+2 −3
Original line number Diff line number Diff line
@@ -4565,9 +4565,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
		goto nopage;

	if (order <= PAGE_ALLOC_COSTLY_ORDER && should_ulmk_retry())
		goto retry;

	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
				 did_some_progress > 0, &no_progress_loops))
		goto retry;
@@ -4585,6 +4582,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
				&compaction_retries))
		goto retry;

	if (order <= PAGE_ALLOC_COSTLY_ORDER && should_ulmk_retry())
		goto retry;

	/* Deal with possible cpuset update races before we start OOM killing */
	if (check_retry_cpuset(cpuset_mems_cookie, ac))