Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b36c7dc authored by Boaz Harrosh's avatar Boaz Harrosh
Browse files

NFSv4.1: define nfs_generic_pg_test



By default, unless pnfs is used coalesce pages until pg_bsize
(rsize or wsize) is reached.

pnfs layout drivers define their own pg_test methods that use
pnfs_generic_pg_test and need to define their own I/O size
limits (e.g. based on the file stripe size).

[Move a check from nfs_pageio_do_add_request to nfs_generic_pg_test]
Signed-off-by: default avatarBoaz Harrosh <bharrosh@panasas.com>
Signed-off-by: default avatarBenny Halevy <bhalevy@panasas.com>
parent 89a58e32
Loading
Loading
Loading
Loading
+20 −24
Original line number Diff line number Diff line
@@ -204,6 +204,21 @@ nfs_wait_on_request(struct nfs_page *req)
			TASK_UNINTERRUPTIBLE);
}

static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
{
	/*
	 * FIXME: ideally we should be able to coalesce all requests
	 * that are not block boundary aligned, but currently this
	 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
	 * since nfs_flush_multi and nfs_pagein_multi assume you
	 * can have only one struct nfs_page.
	 */
	if (desc->pg_bsize < PAGE_SIZE)
		return 0;

	return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
}

/**
 * nfs_pageio_init - initialise a page io descriptor
 * @desc: pointer to descriptor
@@ -229,7 +244,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
	desc->pg_ioflags = io_flags;
	desc->pg_error = 0;
	desc->pg_lseg = NULL;
	desc->pg_test = NULL;
	desc->pg_test = nfs_generic_pg_test;
	pnfs_pageio_init(desc, inode);
}

@@ -260,13 +275,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
		return false;
	if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
		return false;
	/*
	 * Non-whole file layouts need to check that req is inside of
	 * pgio->pg_lseg.
	 */
	if (pgio->pg_test && !pgio->pg_test(pgio, prev, req))
		return false;
	return true;
	return pgio->pg_test(pgio, prev, req);
}

/**
@@ -280,31 +289,18 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
				     struct nfs_page *req)
{
	size_t newlen = req->wb_bytes;

	if (desc->pg_count != 0) {
		struct nfs_page *prev;

		/*
		 * FIXME: ideally we should be able to coalesce all requests
		 * that are not block boundary aligned, but currently this
		 * is problematic for the case of bsize < PAGE_CACHE_SIZE,
		 * since nfs_flush_multi and nfs_pagein_multi assume you
		 * can have only one struct nfs_page.
		 */
		if (desc->pg_bsize < PAGE_SIZE)
			return 0;
		newlen += desc->pg_count;
		if (newlen > desc->pg_bsize)
			return 0;
		prev = nfs_list_entry(desc->pg_list.prev);
		if (!nfs_can_coalesce_requests(prev, req, desc))
			return 0;
	} else
	} else {
		desc->pg_base = req->wb_pgbase;
	}
	nfs_list_remove_request(req);
	nfs_list_add_request(req, &desc->pg_list);
	desc->pg_count = newlen;
	desc->pg_count += req->wb_bytes;
	return 1;
}