Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ba08129 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds
Browse files

mm/memory-failure.c: support use of a dedicated thread to handle SIGBUS(BUS_MCEERR_AO)



Currently memory error handler handles action optional errors in the
deferred manner by default.  And if a recovery aware application wants
to handle it immediately, it can do it by setting PF_MCE_EARLY flag.
However, such signal can be sent only to the main thread, so it's
problematic if the application wants to have a dedicated thread to
handler such signals.

So this patch adds dedicated thread support to memory error handler.  We
have PF_MCE_EARLY flags for each thread separately, so with this patch
AO signal is sent to the thread with PF_MCE_EARLY flag set, not the main
thread.  If you want to implement a dedicated thread, you call prctl()
to set PF_MCE_EARLY on the thread.

Memory error handler collects processes to be killed, so this patch lets
it check PF_MCE_EARLY flag on each thread in the collecting routines.

No behavioral change for all non-early kill cases.

Tony said:

: The old behavior was crazy - someone with a multithreaded process might
: well expect that if they call prctl(PF_MCE_EARLY) in just one thread, then
: that thread would see the SIGBUS with si_code = BUS_MCEERR_A0 - even if
: that thread wasn't the main thread for the process.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: default avatarTony Luck <tony.luck@intel.com>
Cc: Kamil Iskra <iskra@mcs.anl.gov>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Chen Gong <gong.chen@linux.jf.intel.com>
Cc: <stable@vger.kernel.org>	[3.2+]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 74614de1
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -84,6 +84,11 @@ PR_MCE_KILL
		PR_MCE_KILL_EARLY: Early kill
		PR_MCE_KILL_EARLY: Early kill
		PR_MCE_KILL_LATE:  Late kill
		PR_MCE_KILL_LATE:  Late kill
		PR_MCE_KILL_DEFAULT: Use system global default
		PR_MCE_KILL_DEFAULT: Use system global default
	Note that if you want to have a dedicated thread which handles
	the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should
	call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise,
	the SIGBUS is sent to the main thread.

PR_MCE_KILL_GET
PR_MCE_KILL_GET
	return current mode
	return current mode


+43 −13
Original line number Original line Diff line number Diff line
@@ -380,15 +380,44 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
	}
	}
}
}


static int task_early_kill(struct task_struct *tsk, int force_early)
/*
 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
 * on behalf of the thread group. Return task_struct of the (first found)
 * dedicated thread if found, and return NULL otherwise.
 *
 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
 * have to call rcu_read_lock/unlock() in this function.
 */
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
{
{
	struct task_struct *t;

	for_each_thread(tsk, t)
		if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
			return t;
	return NULL;
}

/*
 * Determine whether a given process is "early kill" process which expects
 * to be signaled when some page under the process is hwpoisoned.
 * Return task_struct of the dedicated thread (main thread unless explicitly
 * specified) if the process is "early kill," and otherwise returns NULL.
 */
static struct task_struct *task_early_kill(struct task_struct *tsk,
					   int force_early)
{
	struct task_struct *t;
	if (!tsk->mm)
	if (!tsk->mm)
		return 0;
		return NULL;
	if (force_early)
	if (force_early)
		return 1;
		return tsk;
	if (tsk->flags & PF_MCE_PROCESS)
	t = find_early_kill_thread(tsk);
		return !!(tsk->flags & PF_MCE_EARLY);
	if (t)
	return sysctl_memory_failure_early_kill;
		return t;
	if (sysctl_memory_failure_early_kill)
		return tsk;
	return NULL;
}
}


/*
/*
@@ -410,16 +439,17 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
	read_lock(&tasklist_lock);
	read_lock(&tasklist_lock);
	for_each_process (tsk) {
	for_each_process (tsk) {
		struct anon_vma_chain *vmac;
		struct anon_vma_chain *vmac;
		struct task_struct *t = task_early_kill(tsk, force_early);


		if (!task_early_kill(tsk, force_early))
		if (!t)
			continue;
			continue;
		anon_vma_interval_tree_foreach(vmac, &av->rb_root,
		anon_vma_interval_tree_foreach(vmac, &av->rb_root,
					       pgoff, pgoff) {
					       pgoff, pgoff) {
			vma = vmac->vma;
			vma = vmac->vma;
			if (!page_mapped_in_vma(page, vma))
			if (!page_mapped_in_vma(page, vma))
				continue;
				continue;
			if (vma->vm_mm == tsk->mm)
			if (vma->vm_mm == t->mm)
				add_to_kill(tsk, page, vma, to_kill, tkc);
				add_to_kill(t, page, vma, to_kill, tkc);
		}
		}
	}
	}
	read_unlock(&tasklist_lock);
	read_unlock(&tasklist_lock);
@@ -440,10 +470,10 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
	read_lock(&tasklist_lock);
	read_lock(&tasklist_lock);
	for_each_process(tsk) {
	for_each_process(tsk) {
		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
		struct task_struct *t = task_early_kill(tsk, force_early);


		if (!task_early_kill(tsk, force_early))
		if (!t)
			continue;
			continue;

		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
		vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
				      pgoff) {
				      pgoff) {
			/*
			/*
@@ -453,8 +483,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
			 * Assume applications who requested early kill want
			 * Assume applications who requested early kill want
			 * to be informed of all such data corruptions.
			 * to be informed of all such data corruptions.
			 */
			 */
			if (vma->vm_mm == tsk->mm)
			if (vma->vm_mm == t->mm)
				add_to_kill(tsk, page, vma, to_kill, tkc);
				add_to_kill(t, page, vma, to_kill, tkc);
		}
		}
	}
	}
	read_unlock(&tasklist_lock);
	read_unlock(&tasklist_lock);