Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ab3b21e5 authored by Minchan Kim's avatar Minchan Kim Committed by Charan Teja Reddy
Browse files

mm/madvise: check fatal signal pending of target process

Bail out to prevent unnecessary CPU overhead if target process has pending
fatal signal during (MADV_COLD|MADV_PAGEOUT) operation.

Change-Id: Ic264c3ba5b16c91b6395e5a361a3c47e4fd69256
Link: http://lkml.kernel.org/r/20200302193630.68771-4-minchan@kernel.org


Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Reviewed-by: default avatarSuren Baghdasaryan <surenb@google.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: Christian Brauner <christian@brauner.io>
Cc: Daniel Colascione <dancol@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Dias <joaodias@google.com>
Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oleksandr Natalenko <oleksandr@redhat.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: SeongJae Park <sj38.park@gmail.com>
Cc: SeongJae Park <sjpark@amazon.de>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Sonny Rao <sonnyrao@google.com>
Cc: Tim Murray <timmurray@google.com>
Cc: Christian Brauner <christian.brauner@ubuntu.com>
Cc: <linux-man@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Git-Commit: d6d0112994a95b14016cc7b2825289999fc1b78c
Git-Repo: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git


Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
parent f4ed7311
Loading
Loading
Loading
Loading
+21 −8
Original line number Original line Diff line number Diff line
@@ -39,6 +39,7 @@
struct madvise_walk_private {
struct madvise_walk_private {
	struct mmu_gather *tlb;
	struct mmu_gather *tlb;
	bool pageout;
	bool pageout;
	struct task_struct *target_task;
};
};


/*
/*
@@ -319,6 +320,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
	if (fatal_signal_pending(current))
	if (fatal_signal_pending(current))
		return -EINTR;
		return -EINTR;


	if (private->target_task &&
			fatal_signal_pending(private->target_task))
		return -EINTR;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	if (pmd_trans_huge(*pmd)) {
	if (pmd_trans_huge(*pmd)) {
		pmd_t orig_pmd;
		pmd_t orig_pmd;
@@ -480,12 +485,14 @@ static const struct mm_walk_ops cold_walk_ops = {
};
};


static void madvise_cold_page_range(struct mmu_gather *tlb,
static void madvise_cold_page_range(struct mmu_gather *tlb,
			     struct task_struct *task,
			     struct vm_area_struct *vma,
			     struct vm_area_struct *vma,
			     unsigned long addr, unsigned long end)
			     unsigned long addr, unsigned long end)
{
{
	struct madvise_walk_private walk_private = {
	struct madvise_walk_private walk_private = {
		.pageout = false,
		.pageout = false,
		.tlb = tlb,
		.tlb = tlb,
		.target_task = task,
	};
	};


	tlb_start_vma(tlb, vma);
	tlb_start_vma(tlb, vma);
@@ -493,7 +500,8 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
	tlb_end_vma(tlb, vma);
	tlb_end_vma(tlb, vma);
}
}


static long madvise_cold(struct vm_area_struct *vma,
static long madvise_cold(struct task_struct *task,
			struct vm_area_struct *vma,
			struct vm_area_struct **prev,
			struct vm_area_struct **prev,
			unsigned long start_addr, unsigned long end_addr)
			unsigned long start_addr, unsigned long end_addr)
{
{
@@ -506,19 +514,21 @@ static long madvise_cold(struct vm_area_struct *vma,


	lru_add_drain();
	lru_add_drain();
	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
	madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
	madvise_cold_page_range(&tlb, task, vma, start_addr, end_addr);
	tlb_finish_mmu(&tlb, start_addr, end_addr);
	tlb_finish_mmu(&tlb, start_addr, end_addr);


	return 0;
	return 0;
}
}


static void madvise_pageout_page_range(struct mmu_gather *tlb,
static void madvise_pageout_page_range(struct mmu_gather *tlb,
			     struct task_struct *task,
			     struct vm_area_struct *vma,
			     struct vm_area_struct *vma,
			     unsigned long addr, unsigned long end)
			     unsigned long addr, unsigned long end)
{
{
	struct madvise_walk_private walk_private = {
	struct madvise_walk_private walk_private = {
		.pageout = true,
		.pageout = true,
		.tlb = tlb,
		.tlb = tlb,
		.target_task = task,
	};
	};


	tlb_start_vma(tlb, vma);
	tlb_start_vma(tlb, vma);
@@ -542,7 +552,8 @@ static inline bool can_do_pageout(struct vm_area_struct *vma)
		inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
		inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
}
}


static long madvise_pageout(struct vm_area_struct *vma,
static long madvise_pageout(struct task_struct *task,
			struct vm_area_struct *vma,
			struct vm_area_struct **prev,
			struct vm_area_struct **prev,
			unsigned long start_addr, unsigned long end_addr)
			unsigned long start_addr, unsigned long end_addr)
{
{
@@ -558,7 +569,7 @@ static long madvise_pageout(struct vm_area_struct *vma,


	lru_add_drain();
	lru_add_drain();
	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
	tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
	madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
	madvise_pageout_page_range(&tlb, task, vma, start_addr, end_addr);
	tlb_finish_mmu(&tlb, start_addr, end_addr);
	tlb_finish_mmu(&tlb, start_addr, end_addr);


	return 0;
	return 0;
@@ -938,7 +949,8 @@ static int madvise_inject_error(int behavior,
#endif
#endif


static long
static long
madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
madvise_vma(struct task_struct *task, struct vm_area_struct *vma,
		struct vm_area_struct **prev,
		unsigned long start, unsigned long end, int behavior)
		unsigned long start, unsigned long end, int behavior)
{
{
	switch (behavior) {
	switch (behavior) {
@@ -947,9 +959,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
	case MADV_WILLNEED:
	case MADV_WILLNEED:
		return madvise_willneed(vma, prev, start, end);
		return madvise_willneed(vma, prev, start, end);
	case MADV_COLD:
	case MADV_COLD:
		return madvise_cold(vma, prev, start, end);
		return madvise_cold(task, vma, prev, start, end);
	case MADV_PAGEOUT:
	case MADV_PAGEOUT:
		return madvise_pageout(vma, prev, start, end);
		return madvise_pageout(task, vma, prev, start, end);
	case MADV_FREE:
	case MADV_FREE:
	case MADV_DONTNEED:
	case MADV_DONTNEED:
		return madvise_dontneed_free(vma, prev, start, end, behavior);
		return madvise_dontneed_free(vma, prev, start, end, behavior);
@@ -1166,7 +1178,8 @@ int do_madvise(struct task_struct *target_task, struct mm_struct *mm,
			tmp = end;
			tmp = end;


		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
		error = madvise_vma(vma, &prev, start, tmp, behavior);
		error = madvise_vma(target_task, vma, &prev,
					start, tmp, behavior);
		if (error)
		if (error)
			goto out;
			goto out;
		start = tmp;
		start = tmp;