Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ac46d4f3 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds
Browse files

mm/mmu_notifier: use structure for invalidate_range_start/end calls v2

To avoid having to change many call sites everytime we want to add a
parameter use a structure to group all parameters for the mmu_notifier
invalidate_range_start/end cakks.  No functional changes with this patch.

[akpm@linux-foundation.org: coding style fixes]
Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com


Signed-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarJan Kara <jack@suse.cz>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <zwisler@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Felix Kuehling <felix.kuehling@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
From: Jérôme Glisse <jglisse@redhat.com>
Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3

fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n

Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com


Signed-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5d6527a7
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -779,7 +779,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,

	i_mmap_lock_read(mapping);
	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
		unsigned long address, start, end;
		struct mmu_notifier_range range;
		unsigned long address;

		cond_resched();

@@ -793,7 +794,8 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
		 * call mmu_notifier_invalidate_range_start() on our behalf
		 * before taking any lock.
		 */
		if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
		if (follow_pte_pmd(vma->vm_mm, address, &range,
				   &ptep, &pmdp, &ptl))
			continue;

		/*
@@ -835,7 +837,7 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
			pte_unmap_unlock(ptep, ptl);
		}

		mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
		mmu_notifier_invalidate_range_end(&range);
	}
	i_mmap_unlock_read(mapping);
}
+5 −2
Original line number Diff line number Diff line
@@ -1096,6 +1096,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
		return -ESRCH;
	mm = get_task_mm(task);
	if (mm) {
		struct mmu_notifier_range range;
		struct clear_refs_private cp = {
			.type = type,
		};
@@ -1139,11 +1140,13 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
				downgrade_write(&mm->mmap_sem);
				break;
			}
			mmu_notifier_invalidate_range_start(mm, 0, -1);

			mmu_notifier_range_init(&range, mm, 0, -1UL);
			mmu_notifier_invalidate_range_start(&range);
		}
		walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
		if (type == CLEAR_REFS_SOFT_DIRTY)
			mmu_notifier_invalidate_range_end(mm, 0, -1);
			mmu_notifier_invalidate_range_end(&range);
		tlb_finish_mmu(&tlb, 0, -1);
		up_read(&mm->mmap_sem);
out_mm:
+4 −2
Original line number Diff line number Diff line
@@ -1451,6 +1451,8 @@ struct mm_walk {
	void *private;
};

struct mmu_notifier_range;

int walk_page_range(unsigned long addr, unsigned long end,
		struct mm_walk *walk);
int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
@@ -1459,7 +1461,7 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
			struct vm_area_struct *vma);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
			     unsigned long *start, unsigned long *end,
		   struct mmu_notifier_range *range,
		   pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn);
+58 −29
Original line number Diff line number Diff line
@@ -220,11 +220,8 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
				     unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
				      unsigned long address, pte_t pte);
extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end,
				  bool blockable);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end,
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
				  bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
				  unsigned long start, unsigned long end);
@@ -268,33 +265,37 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
		__mmu_notifier_change_pte(mm, address, pte);
}

static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
	if (mm_has_notifiers(mm))
		__mmu_notifier_invalidate_range_start(mm, start, end, true);
	if (mm_has_notifiers(range->mm)) {
		range->blockable = true;
		__mmu_notifier_invalidate_range_start(range);
	}
}

static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
	if (mm_has_notifiers(mm))
		return __mmu_notifier_invalidate_range_start(mm, start, end, false);
	if (mm_has_notifiers(range->mm)) {
		range->blockable = false;
		return __mmu_notifier_invalidate_range_start(range);
	}
	return 0;
}

static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline void
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
	if (mm_has_notifiers(mm))
		__mmu_notifier_invalidate_range_end(mm, start, end, false);
	if (mm_has_notifiers(range->mm))
		__mmu_notifier_invalidate_range_end(range, false);
}

static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
	if (mm_has_notifiers(mm))
		__mmu_notifier_invalidate_range_end(mm, start, end, true);
	if (mm_has_notifiers(range->mm))
		__mmu_notifier_invalidate_range_end(range, true);
}

static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -315,6 +316,17 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
		__mmu_notifier_mm_destroy(mm);
}


static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
					   struct mm_struct *mm,
					   unsigned long start,
					   unsigned long end)
{
	range->mm = mm;
	range->start = start;
	range->end = end;
}

#define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
({									\
	int __young;							\
@@ -427,6 +439,23 @@ extern void mmu_notifier_call_srcu(struct rcu_head *rcu,

#else /* CONFIG_MMU_NOTIFIER */

struct mmu_notifier_range {
	unsigned long start;
	unsigned long end;
};

static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
					    unsigned long start,
					    unsigned long end)
{
	range->start = start;
	range->end = end;
}

#define mmu_notifier_range_init(range, mm, start, end) \
	_mmu_notifier_range_init(range, start, end)


static inline int mm_has_notifiers(struct mm_struct *mm)
{
	return 0;
@@ -454,24 +483,24 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{
}

static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
}

static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
	return 0;
}

static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline
void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}

static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
}

+5 −5
Original line number Diff line number Diff line
@@ -171,11 +171,11 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
		.address = addr,
	};
	int err;
	/* For mmu_notifiers */
	const unsigned long mmun_start = addr;
	const unsigned long mmun_end   = addr + PAGE_SIZE;
	struct mmu_notifier_range range;
	struct mem_cgroup *memcg;

	mmu_notifier_range_init(&range, mm, addr, addr + PAGE_SIZE);

	VM_BUG_ON_PAGE(PageTransHuge(old_page), old_page);

	err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
@@ -186,7 +186,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
	/* For try_to_free_swap() and munlock_vma_page() below */
	lock_page(old_page);

	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
	mmu_notifier_invalidate_range_start(&range);
	err = -EAGAIN;
	if (!page_vma_mapped_walk(&pvmw)) {
		mem_cgroup_cancel_charge(new_page, memcg, false);
@@ -220,7 +220,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,

	err = 0;
 unlock:
	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
	mmu_notifier_invalidate_range_end(&range);
	unlock_page(old_page);
	return err;
}
Loading