Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06cf6f2e authored by Chuck Lever's avatar Chuck Lever Committed by Trond Myklebust
Browse files

NFS: Eliminate nfs_get_user_pages()



Neil Brown observed that the kmalloc() in nfs_get_user_pages() is more
likely to fail if the I/O is large enough to require the allocation of more
than a single page to keep track of all the pinned pages in the user's
buffer.

Instead of tracking one large page array per dreq/iocb, track pages per
nfs_read/write_data, just like the cached I/O path does.  An array for
pages is already allocated for us by nfs_readdata_alloc() (and the write
and commit equivalents).

This is also required for adding support for vectored I/O to the NFS direct
I/O path.

The original reason to pin the user buffer and allocate all the NFS data
structures before trying to schedule I/O was to ensure all needed resources
are allocated on the client before starting to send requests.  This reduces
the chance that resource exhaustion on the client will cause a short read
or write.

On the other hand, for an application making very large application I/O
requests, this means that it will be nearly impossible for the application
to make forward progress on a resource-limited client.

Thus, moving the buffer pinning functionality into the I/O scheduling
loops should be good for scalability.  The next patch will do the same for
NFS data structure allocation.

Signed-off-by: default avatarChuck Lever <cel@netapp.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 9c93ab7d
Loading
Loading
Loading
Loading
+111 −94
Original line number Diff line number Diff line
@@ -73,8 +73,6 @@ struct nfs_direct_req {
	struct nfs_open_context	*ctx;		/* file open context info */
	struct kiocb *		iocb;		/* controlling i/o request */
	struct inode *		inode;		/* target file of i/o */
	struct page **		pages;		/* pages in our buffer */
	unsigned int		npages;		/* count of pages */

	/* completion state */
	atomic_t		io_count;	/* i/os we're waiting for */
@@ -104,6 +102,20 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
	return atomic_dec_and_test(&dreq->io_count);
}

/*
 * "size" is never larger than rsize or wsize.
 */
static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
{
	int page_count;

	page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	page_count -= user_addr >> PAGE_SHIFT;
	BUG_ON(page_count < 0);

	return page_count;
}

/**
 * nfs_direct_IO - NFS address space operation for direct I/O
 * @rw: direction (read or write)
@@ -143,40 +155,6 @@ static void nfs_direct_release_pages(struct page **pages, int npages)
		page_cache_release(pages[i]);
}

static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
{
	int result = -ENOMEM;
	unsigned long page_count;
	size_t array_size;

	page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	page_count -= user_addr >> PAGE_SHIFT;

	array_size = (page_count * sizeof(struct page *));
	*pages = kmalloc(array_size, GFP_KERNEL);
	if (*pages) {
		down_read(&current->mm->mmap_sem);
		result = get_user_pages(current, current->mm, user_addr,
					page_count, (rw == READ), 0,
					*pages, NULL);
		up_read(&current->mm->mmap_sem);
		if (result != page_count) {
			/*
			 * If we got fewer pages than expected from
			 * get_user_pages(), the user buffer runs off the
			 * end of a mapping; return EFAULT.
			 */
			if (result >= 0) {
				nfs_direct_release_pages(*pages, result);
				result = -EFAULT;
			} else
				kfree(*pages);
			*pages = NULL;
		}
	}
	return result;
}

static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
	struct nfs_direct_req *dreq;
@@ -233,13 +211,8 @@ out:
}

/*
 * We must hold a reference to all the pages in this direct read request
 * until the RPCs complete.  This could be long *after* we are woken up in
 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
 *
 * In addition, synchronous I/O uses a stack-allocated iocb.  Thus we
 * can't trust the iocb is still valid here if this is a synchronous
 * request.  If the waiter is woken prematurely, the iocb is long gone.
 * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
 * the iocb is still valid here if this is a synchronous request.
 */
static void nfs_direct_complete(struct nfs_direct_req *dreq)
{
@@ -297,6 +270,11 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
	return dreq;
}

/*
 * We must hold a reference to all the pages in this direct read request
 * until the RPCs complete.  This could be long *after* we are woken up in
 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
 */
static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
{
	struct nfs_read_data *data = calldata;
@@ -305,6 +283,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
	if (nfs_readpage_result(task, data) != 0)
		return;

	nfs_direct_dirty_pages(data->pagevec, data->npages);
	nfs_direct_release_pages(data->pagevec, data->npages);

	spin_lock(&dreq->lock);

	if (likely(task->tk_status >= 0))
@@ -314,12 +295,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)

	spin_unlock(&dreq->lock);

	if (put_dreq(dreq)) {
		nfs_direct_dirty_pages(dreq->pages, dreq->npages);
		nfs_direct_release_pages(dreq->pages, dreq->npages);
	if (put_dreq(dreq))
		nfs_direct_complete(dreq);
}
}

static const struct rpc_call_ops nfs_read_direct_ops = {
	.rpc_call_done = nfs_direct_read_result,
@@ -328,21 +306,23 @@ static const struct rpc_call_ops nfs_read_direct_ops = {

/*
 * For each nfs_read_data struct that was allocated on the list, dispatch
 * an NFS READ operation
 * an NFS READ operation.  If get_user_pages() fails, we stop sending reads.
 * Read length accounting is handled by nfs_direct_read_result().
 * Otherwise, if no requests have been sent, just return an error.
 */
static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
{
	struct nfs_open_context *ctx = dreq->ctx;
	struct inode *inode = ctx->dentry->d_inode;
	struct list_head *list = &dreq->list;
	struct page **pages = dreq->pages;
	size_t rsize = NFS_SERVER(inode)->rsize;
	unsigned int curpage, pgbase;
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
	struct nfs_read_data *data;

	curpage = 0;
	pgbase = user_addr & ~PAGE_MASK;
	do {
		struct nfs_read_data *data;
		size_t bytes;

		bytes = rsize;
@@ -353,13 +333,21 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long
		data = list_entry(list->next, struct nfs_read_data, pages);
		list_del_init(&data->pages);

		data->npages = nfs_direct_count_pages(user_addr, bytes);
		down_read(&current->mm->mmap_sem);
		result = get_user_pages(current, current->mm, user_addr,
					data->npages, 1, 0, data->pagevec, NULL);
		up_read(&current->mm->mmap_sem);
		if (unlikely(result < data->npages))
			goto out_err;

		data->inode = inode;
		data->cred = ctx->cred;
		data->args.fh = NFS_FH(inode);
		data->args.context = ctx;
		data->args.offset = pos;
		data->args.pgbase = pgbase;
		data->args.pages = &pages[curpage];
		data->args.pages = data->pagevec;
		data->args.count = bytes;
		data->res.fattr = &data->fattr;
		data->res.eof = 0;
@@ -382,17 +370,36 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long
				bytes,
				(unsigned long long)data->args.offset);

		started += bytes;
		user_addr += bytes;
		pos += bytes;
		pgbase += bytes;
		curpage += pgbase >> PAGE_SHIFT;
		pgbase &= ~PAGE_MASK;

		count -= bytes;
	} while (count != 0);
	BUG_ON(!list_empty(list));
	return 0;

out_err:
	if (result > 0)
		nfs_direct_release_pages(data->pagevec, result);

	list_add(&data->pages, list);
	while (!list_empty(list)) {
		data = list_entry(list->next, struct nfs_read_data, pages);
		list_del(&data->pages);
		nfs_readdata_free(data);
		if (put_dreq(dreq))
			nfs_direct_complete(dreq);
	}

static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages)
	if (started)
		return 0;
	return result < 0 ? (ssize_t) result : -EFAULT;
}

static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
{
	ssize_t result;
	sigset_t oldset;
@@ -404,8 +411,6 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
	if (!dreq)
		return -ENOMEM;

	dreq->pages = pages;
	dreq->npages = nr_pages;
	dreq->inode = inode;
	dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
	if (!is_sync_kiocb(iocb))
@@ -413,7 +418,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size

	nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
	rpc_clnt_sigmask(clnt, &oldset);
	nfs_direct_read_schedule(dreq, user_addr, count, pos);
	result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
	if (!result)
		result = nfs_direct_wait(dreq);
	rpc_clnt_sigunmask(clnt, &oldset);

@@ -426,9 +432,9 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
	while (!list_empty(&dreq->list)) {
		struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
		list_del(&data->pages);
		nfs_direct_release_pages(data->pagevec, data->npages);
		nfs_writedata_release(data);
	}
	nfs_direct_release_pages(dreq->pages, dreq->npages);
}

#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -672,21 +678,23 @@ static const struct rpc_call_ops nfs_write_direct_ops = {

/*
 * For each nfs_write_data struct that was allocated on the list, dispatch
 * an NFS WRITE operation
 * an NFS WRITE operation.  If get_user_pages() fails, we stop sending writes.
 * Write length accounting is handled by nfs_direct_write_result().
 * Otherwise, if no requests have been sent, just return an error.
 */
static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
{
	struct nfs_open_context *ctx = dreq->ctx;
	struct inode *inode = ctx->dentry->d_inode;
	struct list_head *list = &dreq->list;
	struct page **pages = dreq->pages;
	size_t wsize = NFS_SERVER(inode)->wsize;
	unsigned int curpage, pgbase;
	unsigned int pgbase;
	int result;
	ssize_t started = 0;
	struct nfs_write_data *data;

	curpage = 0;
	pgbase = user_addr & ~PAGE_MASK;
	do {
		struct nfs_write_data *data;
		size_t bytes;

		bytes = wsize;
@@ -695,6 +703,15 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long

		BUG_ON(list_empty(list));
		data = list_entry(list->next, struct nfs_write_data, pages);

		data->npages = nfs_direct_count_pages(user_addr, bytes);
		down_read(&current->mm->mmap_sem);
		result = get_user_pages(current, current->mm, user_addr,
					data->npages, 0, 0, data->pagevec, NULL);
		up_read(&current->mm->mmap_sem);
		if (unlikely(result < data->npages))
			goto out_err;

		list_move_tail(&data->pages, &dreq->rewrite_list);

		data->inode = inode;
@@ -703,7 +720,7 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long
		data->args.context = ctx;
		data->args.offset = pos;
		data->args.pgbase = pgbase;
		data->args.pages = &pages[curpage];
		data->args.pages = data->pagevec;
		data->args.count = bytes;
		data->res.fattr = &data->fattr;
		data->res.count = bytes;
@@ -727,17 +744,36 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long
				bytes,
				(unsigned long long)data->args.offset);

		started += bytes;
		user_addr += bytes;
		pos += bytes;
		pgbase += bytes;
		curpage += pgbase >> PAGE_SHIFT;
		pgbase &= ~PAGE_MASK;

		count -= bytes;
	} while (count != 0);
	BUG_ON(!list_empty(list));
	return 0;

out_err:
	if (result > 0)
		nfs_direct_release_pages(data->pagevec, result);

	list_add(&data->pages, list);
	while (!list_empty(list)) {
		data = list_entry(list->next, struct nfs_write_data, pages);
		list_del(&data->pages);
		nfs_writedata_free(data);
		if (put_dreq(dreq))
			nfs_direct_write_complete(dreq, inode);
	}

static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages)
	if (started)
		return 0;
	return result < 0 ? (ssize_t) result : -EFAULT;
}

static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
{
	ssize_t result;
	sigset_t oldset;
@@ -753,8 +789,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
	if (dreq->commit_data == NULL || count < wsize)
		sync = FLUSH_STABLE;

	dreq->pages = pages;
	dreq->npages = nr_pages;
	dreq->inode = inode;
	dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
	if (!is_sync_kiocb(iocb))
@@ -765,7 +799,8 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
	nfs_begin_data_update(inode);

	rpc_clnt_sigmask(clnt, &oldset);
	nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
	result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
	if (!result)
		result = nfs_direct_wait(dreq);
	rpc_clnt_sigunmask(clnt, &oldset);

@@ -796,8 +831,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
{
	ssize_t retval = -EINVAL;
	int page_count;
	struct page **pages;
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;

@@ -819,14 +852,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count,
	if (retval)
		goto out;

	retval = nfs_get_user_pages(READ, (unsigned long) buf,
						count, &pages);
	if (retval < 0)
		goto out;
	page_count = retval;

	retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
						pages, page_count);
	retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
	if (retval > 0)
		iocb->ki_pos = pos + retval;

@@ -862,8 +888,6 @@ out:
ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
{
	ssize_t retval;
	int page_count;
	struct page **pages;
	struct file *file = iocb->ki_filp;
	struct address_space *mapping = file->f_mapping;

@@ -891,14 +915,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t
	if (retval)
		goto out;

	retval = nfs_get_user_pages(WRITE, (unsigned long) buf,
						count, &pages);
	if (retval < 0)
		goto out;
	page_count = retval;

	retval = nfs_direct_write(iocb, (unsigned long) buf, count,
					pos, pages, page_count);
	retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);

	/*
	 * XXX: nfs_end_data_update() already ensures this file's
+2 −0
Original line number Diff line number Diff line
@@ -729,6 +729,7 @@ struct nfs_read_data {
	struct list_head	pages;	/* Coalesced read requests */
	struct nfs_page		*req;	/* multi ops per nfs_page */
	struct page		**pagevec;
	unsigned int		npages;	/* active pages in pagevec */
	struct nfs_readargs args;
	struct nfs_readres  res;
#ifdef CONFIG_NFS_V4
@@ -747,6 +748,7 @@ struct nfs_write_data {
	struct list_head	pages;		/* Coalesced requests we wish to flush */
	struct nfs_page		*req;		/* multi ops per nfs_page */
	struct page		**pagevec;
	unsigned int		npages;		/* active pages in pagevec */
	struct nfs_writeargs	args;		/* argument struct */
	struct nfs_writeres	res;		/* result struct */
#ifdef CONFIG_NFS_V4