Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 69048176 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds
Browse files

vdso: make arch_setup_additional_pages wait for mmap_sem for write killable



most architectures are relying on mmap_sem for write in their
arch_setup_additional_pages.  If the waiting task gets killed by the oom
killer it would block oom_reaper from asynchronous address space reclaim
and reduce the chances of timely OOM resolving.  Wait for the lock in
the killable mode and return with EINTR if the task got killed while
waiting.

Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: Andy Lutomirski <luto@amacapital.net>	[x86 vdso]
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 91f4f94e
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -420,7 +420,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
	npages = 1; /* for sigpage */
	npages += vdso_total_pages;

	down_write(&mm->mmap_sem);
	if (down_write_killable(&mm->mmap_sem))
		return -EINTR;
	hint = sigpage_addr(mm, npages);
	addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
	if (IS_ERR_VALUE(addr)) {
+4 −2
Original line number Diff line number Diff line
@@ -95,7 +95,8 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
	};
	void *ret;

	down_write(&mm->mmap_sem);
	if (down_write_killable(&mm->mmap_sem))
		return -EINTR;
	current->mm->context.vdso = (void *)addr;

	/* Map vectors page at the high address. */
@@ -163,7 +164,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
	/* Be sure to map the data page */
	vdso_mapping_len = vdso_text_len + PAGE_SIZE;

	down_write(&mm->mmap_sem);
	if (down_write_killable(&mm->mmap_sem))
		return -EINTR;
	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
	if (IS_ERR_VALUE(vdso_base)) {
		ret = ERR_PTR(vdso_base);
+2 −1
Original line number Diff line number Diff line
@@ -65,7 +65,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
	unsigned long vdso_base;
	struct mm_struct *mm = current->mm;

	down_write(&mm->mmap_sem);
	if (down_write_killable(&mm->mmap_sem))
		return -EINTR;

	/* Try to get it loaded right near ld.so/glibc. */
	vdso_base = STACK_TOP;
+2 −1
Original line number Diff line number Diff line
@@ -104,7 +104,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
	struct resource gic_res;
	int ret;

	down_write(&mm->mmap_sem);
	if (down_write_killable(&mm->mmap_sem))
		return -EINTR;

	/*
	 * Determine total area size. This includes the VDSO data itself, the
+2 −1
Original line number Diff line number Diff line
@@ -195,7 +195,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
	 * and end up putting it elsewhere.
	 * Add enough to the size so that the result can be aligned.
	 */
	down_write(&mm->mmap_sem);
	if (down_write_killable(&mm->mmap_sem))
		return -EINTR;
	vdso_base = get_unmapped_area(NULL, vdso_base,
				      (vdso_pages << PAGE_SHIFT) +
				      ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
Loading