Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a87e37e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull get_user_pages_fast updates from Al Viro:
 "A bit more get_user_pages work"

* 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  kvm: switch get_user_page_nowait() to get_user_pages_unlocked()
  __get_user_pages_locked(): get rid of notify_drop argument
  get_user_pages_unlocked(): pass true to __get_user_pages_locked() notify_drop
  cris: switch to get_user_pages_fast()
  fold __get_user_pages_unlocked() into its sole remaining caller
parents 19e7b5f9 ce53053c
Loading
Loading
Loading
Loading
+7 −16
Original line number Diff line number Diff line
@@ -2717,37 +2717,28 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
		}
	}

	/* Acquire the mm page semaphore. */
	down_read(&current->mm->mmap_sem);

	err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
	err = get_user_pages_fast((unsigned long)(oper.indata + prev_ix),
			     noinpages,
			     0,  /* read access only for in data */
			     inpages,
			     NULL);
			     false,  /* read access only for in data */
			     inpages);

	if (err < 0) {
		up_read(&current->mm->mmap_sem);
		nooutpages = noinpages = 0;
		DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
		goto error_cleanup;
	}
	noinpages = err;
	if (oper.do_cipher) {
		err = get_user_pages((unsigned long int)oper.cipher_outdata,
		err = get_user_pages_fast((unsigned long)oper.cipher_outdata,
				     nooutpages,
				     FOLL_WRITE, /* write access for out data */
				     outpages,
				     NULL);
		up_read(&current->mm->mmap_sem);
				     true, /* write access for out data */
				     outpages);
		if (err < 0) {
			nooutpages = 0;
			DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
			goto error_cleanup;
		}
		nooutpages = err;
	} else {
		up_read(&current->mm->mmap_sem);
	}

	/* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
+15 −31
Original line number Diff line number Diff line
@@ -848,7 +848,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
						unsigned long nr_pages,
						struct page **pages,
						struct vm_area_struct **vmas,
						int *locked, bool notify_drop,
						int *locked,
						unsigned int flags)
{
	long ret, pages_done;
@@ -922,7 +922,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
		pages++;
		start += PAGE_SIZE;
	}
	if (notify_drop && lock_dropped && *locked) {
	if (lock_dropped && *locked) {
		/*
		 * We must let the caller know we temporarily dropped the lock
		 * and so the critical section protected by it was lost.
@@ -959,35 +959,11 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
			   int *locked)
{
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
				       pages, NULL, locked, true,
				       pages, NULL, locked,
				       gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);

/*
 * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for
 * tsk, mm to be specified.
 *
 * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
 * caller if required (just like with __get_user_pages). "FOLL_GET"
 * is set implicitly if "pages" is non-NULL.
 */
static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
		struct mm_struct *mm, unsigned long start,
		unsigned long nr_pages, struct page **pages,
		unsigned int gup_flags)
{
	long ret;
	int locked = 1;

	down_read(&mm->mmap_sem);
	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
				      &locked, false, gup_flags);
	if (locked)
		up_read(&mm->mmap_sem);
	return ret;
}

/*
 * get_user_pages_unlocked() is suitable to replace the form:
 *
@@ -1006,8 +982,16 @@ static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
			     struct page **pages, unsigned int gup_flags)
{
	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
					 pages, gup_flags | FOLL_TOUCH);
	struct mm_struct *mm = current->mm;
	int locked = 1;
	long ret;

	down_read(&mm->mmap_sem);
	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
				      &locked, gup_flags | FOLL_TOUCH);
	if (locked)
		up_read(&mm->mmap_sem);
	return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);

@@ -1073,7 +1057,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
		struct vm_area_struct **vmas, int *locked)
{
	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
				       locked, true,
				       locked,
				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
EXPORT_SYMBOL(get_user_pages_remote);
@@ -1090,7 +1074,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
		struct vm_area_struct **vmas)
{
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
				       pages, vmas, NULL, false,
				       pages, vmas, NULL,
				       gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
+12 −31
Original line number Diff line number Diff line
@@ -1322,17 +1322,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
	return gfn_to_hva_memslot_prot(slot, gfn, writable);
}

static int get_user_page_nowait(unsigned long start, int write,
		struct page **page)
{
	int flags = FOLL_NOWAIT | FOLL_HWPOISON;

	if (write)
		flags |= FOLL_WRITE;

	return get_user_pages(start, 1, flags, page, NULL);
}

static inline int check_user_page_hwpoison(unsigned long addr)
{
	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
@@ -1381,7 +1370,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
			   bool *writable, kvm_pfn_t *pfn)
{
	struct page *page[1];
	unsigned int flags = FOLL_HWPOISON;
	struct page *page;
	int npages = 0;

	might_sleep();
@@ -1389,35 +1379,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
	if (writable)
		*writable = write_fault;

	if (async) {
		down_read(&current->mm->mmap_sem);
		npages = get_user_page_nowait(addr, write_fault, page);
		up_read(&current->mm->mmap_sem);
	} else {
		unsigned int flags = FOLL_HWPOISON;

	if (write_fault)
		flags |= FOLL_WRITE;
	if (async)
		flags |= FOLL_NOWAIT;

		npages = get_user_pages_unlocked(addr, 1, page, flags);
	}
	npages = get_user_pages_unlocked(addr, 1, &page, flags);
	if (npages != 1)
		return npages;

	/* map read fault as writable if possible */
	if (unlikely(!write_fault) && writable) {
		struct page *wpage[1];
		struct page *wpage;

		npages = __get_user_pages_fast(addr, 1, 1, wpage);
		if (npages == 1) {
		if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
			*writable = true;
			put_page(page[0]);
			page[0] = wpage[0];
			put_page(page);
			page = wpage;
		}

		npages = 1;
	}
	*pfn = page_to_pfn(page[0]);
	*pfn = page_to_pfn(page);
	return npages;
}