Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d45f60c6 authored by Weston Andros Adamson's avatar Weston Andros Adamson Committed by Trond Myklebust
Browse files

nfs: merge nfs_pgio_data into _header



struct nfs_pgio_data only exists as a member of nfs_pgio_header, but is
passed around everywhere, because there used to be multiple _data structs
per _header. Many of these functions then use the _data to find a pointer
to the _header.  This patch cleans this up by merging the nfs_pgio_data
structure into nfs_pgio_header and passing nfs_pgio_header around instead.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarWeston Andros Adamson <dros@primarydata.com>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent 823b0c9d
Loading
Loading
Loading
Loading
+47 −51
Original line number Diff line number Diff line
@@ -210,8 +210,7 @@ static void bl_end_io_read(struct bio *bio, int err)
			SetPageUptodate(bvec->bv_page);

	if (err) {
		struct nfs_pgio_data *rdata = par->data;
		struct nfs_pgio_header *header = rdata->header;
		struct nfs_pgio_header *header = par->data;

		if (!header->pnfs_error)
			header->pnfs_error = -EIO;
@@ -224,44 +223,44 @@ static void bl_end_io_read(struct bio *bio, int err)
static void bl_read_cleanup(struct work_struct *work)
{
	struct rpc_task *task;
	struct nfs_pgio_data *rdata;
	struct nfs_pgio_header *hdr;
	dprintk("%s enter\n", __func__);
	task = container_of(work, struct rpc_task, u.tk_work);
	rdata = container_of(task, struct nfs_pgio_data, task);
	pnfs_ld_read_done(rdata);
	hdr = container_of(task, struct nfs_pgio_header, task);
	pnfs_ld_read_done(hdr);
}

static void
bl_end_par_io_read(void *data, int unused)
{
	struct nfs_pgio_data *rdata = data;
	struct nfs_pgio_header *hdr = data;

	rdata->task.tk_status = rdata->header->pnfs_error;
	INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
	schedule_work(&rdata->task.u.tk_work);
	hdr->task.tk_status = hdr->pnfs_error;
	INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
	schedule_work(&hdr->task.u.tk_work);
}

static enum pnfs_try_status
bl_read_pagelist(struct nfs_pgio_data *rdata)
bl_read_pagelist(struct nfs_pgio_header *hdr)
{
	struct nfs_pgio_header *header = rdata->header;
	struct nfs_pgio_header *header = hdr;
	int i, hole;
	struct bio *bio = NULL;
	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
	sector_t isect, extent_length = 0;
	struct parallel_io *par;
	loff_t f_offset = rdata->args.offset;
	size_t bytes_left = rdata->args.count;
	loff_t f_offset = hdr->args.offset;
	size_t bytes_left = hdr->args.count;
	unsigned int pg_offset, pg_len;
	struct page **pages = rdata->args.pages;
	int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
	struct page **pages = hdr->args.pages;
	int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT;
	const bool is_dio = (header->dreq != NULL);

	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
		rdata->page_array.npages, f_offset,
		(unsigned int)rdata->args.count);
		hdr->page_array.npages, f_offset,
		(unsigned int)hdr->args.count);

	par = alloc_parallel(rdata);
	par = alloc_parallel(hdr);
	if (!par)
		goto use_mds;
	par->pnfs_callback = bl_end_par_io_read;
@@ -269,7 +268,7 @@ bl_read_pagelist(struct nfs_pgio_data *rdata)

	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
	/* Code assumes extents are page-aligned */
	for (i = pg_index; i < rdata->page_array.npages; i++) {
	for (i = pg_index; i < hdr->page_array.npages; i++) {
		if (!extent_length) {
			/* We've used up the previous extent */
			bl_put_extent(be);
@@ -319,7 +318,7 @@ bl_read_pagelist(struct nfs_pgio_data *rdata)

			be_read = (hole && cow_read) ? cow_read : be;
			bio = do_add_page_to_bio(bio,
						 rdata->page_array.npages - i,
						 hdr->page_array.npages - i,
						 READ,
						 isect, pages[i], be_read,
						 bl_end_io_read, par,
@@ -334,10 +333,10 @@ bl_read_pagelist(struct nfs_pgio_data *rdata)
		extent_length -= PAGE_CACHE_SECTORS;
	}
	if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
		rdata->res.eof = 1;
		rdata->res.count = header->inode->i_size - rdata->args.offset;
		hdr->res.eof = 1;
		hdr->res.count = header->inode->i_size - hdr->args.offset;
	} else {
		rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset;
		hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset;
	}
out:
	bl_put_extent(be);
@@ -392,8 +391,7 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
	}

	if (unlikely(err)) {
		struct nfs_pgio_data *data = par->data;
		struct nfs_pgio_header *header = data->header;
		struct nfs_pgio_header *header = par->data;

		if (!header->pnfs_error)
			header->pnfs_error = -EIO;
@@ -407,8 +405,7 @@ static void bl_end_io_write(struct bio *bio, int err)
{
	struct parallel_io *par = bio->bi_private;
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct nfs_pgio_data *data = par->data;
	struct nfs_pgio_header *header = data->header;
	struct nfs_pgio_header *header = par->data;

	if (!uptodate) {
		if (!header->pnfs_error)
@@ -425,32 +422,32 @@ static void bl_end_io_write(struct bio *bio, int err)
static void bl_write_cleanup(struct work_struct *work)
{
	struct rpc_task *task;
	struct nfs_pgio_data *wdata;
	struct nfs_pgio_header *hdr;
	dprintk("%s enter\n", __func__);
	task = container_of(work, struct rpc_task, u.tk_work);
	wdata = container_of(task, struct nfs_pgio_data, task);
	if (likely(!wdata->header->pnfs_error)) {
	hdr = container_of(task, struct nfs_pgio_header, task);
	if (likely(!hdr->pnfs_error)) {
		/* Marks for LAYOUTCOMMIT */
		mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
				     wdata->args.offset, wdata->args.count);
		mark_extents_written(BLK_LSEG2EXT(hdr->lseg),
				     hdr->args.offset, hdr->args.count);
	}
	pnfs_ld_write_done(wdata);
	pnfs_ld_write_done(hdr);
}

/* Called when last of bios associated with a bl_write_pagelist call finishes */
static void bl_end_par_io_write(void *data, int num_se)
{
	struct nfs_pgio_data *wdata = data;
	struct nfs_pgio_header *hdr = data;

	if (unlikely(wdata->header->pnfs_error)) {
		bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
	if (unlikely(hdr->pnfs_error)) {
		bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval,
					num_se);
	}

	wdata->task.tk_status = wdata->header->pnfs_error;
	wdata->writeverf.committed = NFS_FILE_SYNC;
	INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
	schedule_work(&wdata->task.u.tk_work);
	hdr->task.tk_status = hdr->pnfs_error;
	hdr->writeverf.committed = NFS_FILE_SYNC;
	INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
	schedule_work(&hdr->task.u.tk_work);
}

/* FIXME STUB - mark intersection of layout and page as bad, so is not
@@ -675,18 +672,17 @@ check_page:
}

static enum pnfs_try_status
bl_write_pagelist(struct nfs_pgio_data *wdata, int sync)
bl_write_pagelist(struct nfs_pgio_header *header, int sync)
{
	struct nfs_pgio_header *header = wdata->header;
	int i, ret, npg_zero, pg_index, last = 0;
	struct bio *bio = NULL;
	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
	sector_t isect, last_isect = 0, extent_length = 0;
	struct parallel_io *par = NULL;
	loff_t offset = wdata->args.offset;
	size_t count = wdata->args.count;
	loff_t offset = header->args.offset;
	size_t count = header->args.count;
	unsigned int pg_offset, pg_len, saved_len;
	struct page **pages = wdata->args.pages;
	struct page **pages = header->args.pages;
	struct page *page;
	pgoff_t index;
	u64 temp;
@@ -701,11 +697,11 @@ bl_write_pagelist(struct nfs_pgio_data *wdata, int sync)
		dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
		goto out_mds;
	}
	/* At this point, wdata->page_aray is a (sequential) list of nfs_pages.
	/* At this point, header->page_aray is a (sequential) list of nfs_pages.
	 * We want to write each, and if there is an error set pnfs_error
	 * to have it redone using nfs.
	 */
	par = alloc_parallel(wdata);
	par = alloc_parallel(header);
	if (!par)
		goto out_mds;
	par->pnfs_callback = bl_end_par_io_write;
@@ -792,8 +788,8 @@ next_page:
	bio = bl_submit_bio(WRITE, bio);

	/* Middle pages */
	pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
	for (i = pg_index; i < wdata->page_array.npages; i++) {
	pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
	for (i = pg_index; i < header->page_array.npages; i++) {
		if (!extent_length) {
			/* We've used up the previous extent */
			bl_put_extent(be);
@@ -864,7 +860,7 @@ next_page:
		}


		bio = do_add_page_to_bio(bio, wdata->page_array.npages - i,
		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
					 WRITE,
					 isect, pages[i], be,
					 bl_end_io_write, par,
@@ -893,7 +889,7 @@ next_page:
	}

write_done:
	wdata->res.count = wdata->args.count;
	header->res.count = header->args.count;
out:
	bl_put_extent(be);
	bl_put_extent(cow_read);
+4 −4
Original line number Diff line number Diff line
@@ -148,8 +148,8 @@ static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
{
	struct nfs_writeverf *verfp;

	verfp = nfs_direct_select_verf(dreq, hdr->data.ds_clp,
				      hdr->data.ds_idx);
	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
				      hdr->ds_idx);
	WARN_ON_ONCE(verfp->committed >= 0);
	memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
	WARN_ON_ONCE(verfp->committed < 0);
@@ -169,8 +169,8 @@ static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
{
	struct nfs_writeverf *verfp;

	verfp = nfs_direct_select_verf(dreq, hdr->data.ds_clp,
					 hdr->data.ds_idx);
	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp,
					 hdr->ds_idx);
	if (verfp->committed < 0) {
		nfs_direct_set_hdr_verf(dreq, hdr);
		return 0;
+81 −89
Original line number Diff line number Diff line
@@ -84,19 +84,18 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
	BUG();
}

static void filelayout_reset_write(struct nfs_pgio_data *data)
static void filelayout_reset_write(struct nfs_pgio_header *hdr)
{
	struct nfs_pgio_header *hdr = data->header;
	struct rpc_task *task = &data->task;
	struct rpc_task *task = &hdr->task;

	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		dprintk("%s Reset task %5u for i/o through MDS "
			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
			data->task.tk_pid,
			hdr->task.tk_pid,
			hdr->inode->i_sb->s_id,
			(unsigned long long)NFS_FILEID(hdr->inode),
			data->args.count,
			(unsigned long long)data->args.offset);
			hdr->args.count,
			(unsigned long long)hdr->args.offset);

		task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
							&hdr->pages,
@@ -105,19 +104,18 @@ static void filelayout_reset_write(struct nfs_pgio_data *data)
	}
}

static void filelayout_reset_read(struct nfs_pgio_data *data)
static void filelayout_reset_read(struct nfs_pgio_header *hdr)
{
	struct nfs_pgio_header *hdr = data->header;
	struct rpc_task *task = &data->task;
	struct rpc_task *task = &hdr->task;

	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		dprintk("%s Reset task %5u for i/o through MDS "
			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
			data->task.tk_pid,
			hdr->task.tk_pid,
			hdr->inode->i_sb->s_id,
			(unsigned long long)NFS_FILEID(hdr->inode),
			data->args.count,
			(unsigned long long)data->args.offset);
			hdr->args.count,
			(unsigned long long)hdr->args.offset);

		task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
							&hdr->pages,
@@ -243,18 +241,17 @@ wait_on_recovery:
/* NFS_PROTO call done callback routines */

static int filelayout_read_done_cb(struct rpc_task *task,
				struct nfs_pgio_data *data)
				struct nfs_pgio_header *hdr)
{
	struct nfs_pgio_header *hdr = data->header;
	int err;

	trace_nfs4_pnfs_read(data, task->tk_status);
	err = filelayout_async_handle_error(task, data->args.context->state,
					    data->ds_clp, hdr->lseg);
	trace_nfs4_pnfs_read(hdr, task->tk_status);
	err = filelayout_async_handle_error(task, hdr->args.context->state,
					    hdr->ds_clp, hdr->lseg);

	switch (err) {
	case -NFS4ERR_RESET_TO_MDS:
		filelayout_reset_read(data);
		filelayout_reset_read(hdr);
		return task->tk_status;
	case -EAGAIN:
		rpc_restart_call_prepare(task);
@@ -270,15 +267,14 @@ static int filelayout_read_done_cb(struct rpc_task *task,
 * rfc5661 is not clear about which credential should be used.
 */
static void
filelayout_set_layoutcommit(struct nfs_pgio_data *wdata)
filelayout_set_layoutcommit(struct nfs_pgio_header *hdr)
{
	struct nfs_pgio_header *hdr = wdata->header;

	if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
	    wdata->res.verf->committed == NFS_FILE_SYNC)
	    hdr->res.verf->committed == NFS_FILE_SYNC)
		return;

	pnfs_set_layoutcommit(wdata);
	pnfs_set_layoutcommit(hdr);
	dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
		(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
}
@@ -305,83 +301,82 @@ filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
 */
static void filelayout_read_prepare(struct rpc_task *task, void *data)
{
	struct nfs_pgio_data *rdata = data;
	struct nfs_pgio_header *hdr = data;

	if (unlikely(test_bit(NFS_CONTEXT_BAD, &rdata->args.context->flags))) {
	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
		rpc_exit(task, -EIO);
		return;
	}
	if (filelayout_reset_to_mds(rdata->header->lseg)) {
	if (filelayout_reset_to_mds(hdr->lseg)) {
		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
		filelayout_reset_read(rdata);
		filelayout_reset_read(hdr);
		rpc_exit(task, 0);
		return;
	}
	rdata->pgio_done_cb = filelayout_read_done_cb;
	hdr->pgio_done_cb = filelayout_read_done_cb;

	if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
			&rdata->args.seq_args,
			&rdata->res.seq_res,
	if (nfs41_setup_sequence(hdr->ds_clp->cl_session,
			&hdr->args.seq_args,
			&hdr->res.seq_res,
			task))
		return;
	if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
			rdata->args.lock_context, FMODE_READ) == -EIO)
	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
			hdr->args.lock_context, FMODE_READ) == -EIO)
		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
}

static void filelayout_read_call_done(struct rpc_task *task, void *data)
{
	struct nfs_pgio_data *rdata = data;
	struct nfs_pgio_header *hdr = data;

	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);

	if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
	    task->tk_status == 0) {
		nfs41_sequence_done(task, &rdata->res.seq_res);
		nfs41_sequence_done(task, &hdr->res.seq_res);
		return;
	}

	/* Note this may cause RPC to be resent */
	rdata->header->mds_ops->rpc_call_done(task, data);
	hdr->mds_ops->rpc_call_done(task, data);
}

static void filelayout_read_count_stats(struct rpc_task *task, void *data)
{
	struct nfs_pgio_data *rdata = data;
	struct nfs_pgio_header *hdr = data;

	rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
	rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
}

static void filelayout_read_release(void *data)
{
	struct nfs_pgio_data *rdata = data;
	struct pnfs_layout_hdr *lo = rdata->header->lseg->pls_layout;
	struct nfs_pgio_header *hdr = data;
	struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout;

	filelayout_fenceme(lo->plh_inode, lo);
	nfs_put_client(rdata->ds_clp);
	rdata->header->mds_ops->rpc_release(data);
	nfs_put_client(hdr->ds_clp);
	hdr->mds_ops->rpc_release(data);
}

static int filelayout_write_done_cb(struct rpc_task *task,
				struct nfs_pgio_data *data)
				struct nfs_pgio_header *hdr)
{
	struct nfs_pgio_header *hdr = data->header;
	int err;

	trace_nfs4_pnfs_write(data, task->tk_status);
	err = filelayout_async_handle_error(task, data->args.context->state,
					    data->ds_clp, hdr->lseg);
	trace_nfs4_pnfs_write(hdr, task->tk_status);
	err = filelayout_async_handle_error(task, hdr->args.context->state,
					    hdr->ds_clp, hdr->lseg);

	switch (err) {
	case -NFS4ERR_RESET_TO_MDS:
		filelayout_reset_write(data);
		filelayout_reset_write(hdr);
		return task->tk_status;
	case -EAGAIN:
		rpc_restart_call_prepare(task);
		return -EAGAIN;
	}

	filelayout_set_layoutcommit(data);
	filelayout_set_layoutcommit(hdr);
	return 0;
}

@@ -419,57 +414,57 @@ static int filelayout_commit_done_cb(struct rpc_task *task,

static void filelayout_write_prepare(struct rpc_task *task, void *data)
{
	struct nfs_pgio_data *wdata = data;
	struct nfs_pgio_header *hdr = data;

	if (unlikely(test_bit(NFS_CONTEXT_BAD, &wdata->args.context->flags))) {
	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
		rpc_exit(task, -EIO);
		return;
	}
	if (filelayout_reset_to_mds(wdata->header->lseg)) {
	if (filelayout_reset_to_mds(hdr->lseg)) {
		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
		filelayout_reset_write(wdata);
		filelayout_reset_write(hdr);
		rpc_exit(task, 0);
		return;
	}
	if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
			&wdata->args.seq_args,
			&wdata->res.seq_res,
	if (nfs41_setup_sequence(hdr->ds_clp->cl_session,
			&hdr->args.seq_args,
			&hdr->res.seq_res,
			task))
		return;
	if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
			wdata->args.lock_context, FMODE_WRITE) == -EIO)
	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
			hdr->args.lock_context, FMODE_WRITE) == -EIO)
		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
}

static void filelayout_write_call_done(struct rpc_task *task, void *data)
{
	struct nfs_pgio_data *wdata = data;
	struct nfs_pgio_header *hdr = data;

	if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
	    task->tk_status == 0) {
		nfs41_sequence_done(task, &wdata->res.seq_res);
		nfs41_sequence_done(task, &hdr->res.seq_res);
		return;
	}

	/* Note this may cause RPC to be resent */
	wdata->header->mds_ops->rpc_call_done(task, data);
	hdr->mds_ops->rpc_call_done(task, data);
}

static void filelayout_write_count_stats(struct rpc_task *task, void *data)
{
	struct nfs_pgio_data *wdata = data;
	struct nfs_pgio_header *hdr = data;

	rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
	rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
}

static void filelayout_write_release(void *data)
{
	struct nfs_pgio_data *wdata = data;
	struct pnfs_layout_hdr *lo = wdata->header->lseg->pls_layout;
	struct nfs_pgio_header *hdr = data;
	struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout;

	filelayout_fenceme(lo->plh_inode, lo);
	nfs_put_client(wdata->ds_clp);
	wdata->header->mds_ops->rpc_release(data);
	nfs_put_client(hdr->ds_clp);
	hdr->mds_ops->rpc_release(data);
}

static void filelayout_commit_prepare(struct rpc_task *task, void *data)
@@ -529,19 +524,18 @@ static const struct rpc_call_ops filelayout_commit_call_ops = {
};

static enum pnfs_try_status
filelayout_read_pagelist(struct nfs_pgio_data *data)
filelayout_read_pagelist(struct nfs_pgio_header *hdr)
{
	struct nfs_pgio_header *hdr = data->header;
	struct pnfs_layout_segment *lseg = hdr->lseg;
	struct nfs4_pnfs_ds *ds;
	struct rpc_clnt *ds_clnt;
	loff_t offset = data->args.offset;
	loff_t offset = hdr->args.offset;
	u32 j, idx;
	struct nfs_fh *fh;

	dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
		__func__, hdr->inode->i_ino,
		data->args.pgbase, (size_t)data->args.count, offset);
		hdr->args.pgbase, (size_t)hdr->args.count, offset);

	/* Retrieve the correct rpc_client for the byte range */
	j = nfs4_fl_calc_j_index(lseg, offset);
@@ -559,30 +553,29 @@ filelayout_read_pagelist(struct nfs_pgio_data *data)

	/* No multipath support. Use first DS */
	atomic_inc(&ds->ds_clp->cl_count);
	data->ds_clp = ds->ds_clp;
	data->ds_idx = idx;
	hdr->ds_clp = ds->ds_clp;
	hdr->ds_idx = idx;
	fh = nfs4_fl_select_ds_fh(lseg, j);
	if (fh)
		data->args.fh = fh;
		hdr->args.fh = fh;

	data->args.offset = filelayout_get_dserver_offset(lseg, offset);
	data->mds_offset = offset;
	hdr->args.offset = filelayout_get_dserver_offset(lseg, offset);
	hdr->mds_offset = offset;

	/* Perform an asynchronous read to ds */
	nfs_initiate_pgio(ds_clnt, data,
	nfs_initiate_pgio(ds_clnt, hdr,
			    &filelayout_read_call_ops, 0, RPC_TASK_SOFTCONN);
	return PNFS_ATTEMPTED;
}

/* Perform async writes. */
static enum pnfs_try_status
filelayout_write_pagelist(struct nfs_pgio_data *data, int sync)
filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
{
	struct nfs_pgio_header *hdr = data->header;
	struct pnfs_layout_segment *lseg = hdr->lseg;
	struct nfs4_pnfs_ds *ds;
	struct rpc_clnt *ds_clnt;
	loff_t offset = data->args.offset;
	loff_t offset = hdr->args.offset;
	u32 j, idx;
	struct nfs_fh *fh;

@@ -598,21 +591,20 @@ filelayout_write_pagelist(struct nfs_pgio_data *data, int sync)
		return PNFS_NOT_ATTEMPTED;

	dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
		__func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
		offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));

	data->pgio_done_cb = filelayout_write_done_cb;
	hdr->pgio_done_cb = filelayout_write_done_cb;
	atomic_inc(&ds->ds_clp->cl_count);
	data->ds_clp = ds->ds_clp;
	data->ds_idx = idx;
	hdr->ds_clp = ds->ds_clp;
	hdr->ds_idx = idx;
	fh = nfs4_fl_select_ds_fh(lseg, j);
	if (fh)
		data->args.fh = fh;

	data->args.offset = filelayout_get_dserver_offset(lseg, offset);
		hdr->args.fh = fh;
	hdr->args.offset = filelayout_get_dserver_offset(lseg, offset);

	/* Perform an asynchronous write */
	nfs_initiate_pgio(ds_clnt, data,
	nfs_initiate_pgio(ds_clnt, hdr,
				    &filelayout_write_call_ops, sync,
				    RPC_TASK_SOFTCONN);
	return PNFS_ATTEMPTED;
+3 −3
Original line number Diff line number Diff line
@@ -240,9 +240,9 @@ int nfs_iocounter_wait(struct nfs_io_counter *c);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
void nfs_pgio_data_destroy(struct nfs_pgio_data *);
void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_header *,
		      const struct rpc_call_ops *, int, int);

static inline void nfs_iocounter_init(struct nfs_io_counter *c)
@@ -481,7 +481,7 @@ static inline void nfs_inode_dio_wait(struct inode *inode)
extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);

/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_pgio_data *);
extern void __nfs4_read_done_cb(struct nfs_pgio_header *);
extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
			    const struct rpc_timeout *timeparms,
			    const char *ip_addr);
+12 −9
Original line number Diff line number Diff line
@@ -795,41 +795,44 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
	return status;
}

static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_data *data)
static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
	struct inode *inode = data->header->inode;
	struct inode *inode = hdr->inode;

	if (nfs3_async_handle_jukebox(task, inode))
		return -EAGAIN;

	nfs_invalidate_atime(inode);
	nfs_refresh_inode(inode, &data->fattr);
	nfs_refresh_inode(inode, &hdr->fattr);
	return 0;
}

static void nfs3_proc_read_setup(struct nfs_pgio_data *data, struct rpc_message *msg)
static void nfs3_proc_read_setup(struct nfs_pgio_header *hdr,
				 struct rpc_message *msg)
{
	msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ];
}

static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task, struct nfs_pgio_data *data)
static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task,
				      struct nfs_pgio_header *hdr)
{
	rpc_call_start(task);
	return 0;
}

static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_data *data)
static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
	struct inode *inode = data->header->inode;
	struct inode *inode = hdr->inode;

	if (nfs3_async_handle_jukebox(task, inode))
		return -EAGAIN;
	if (task->tk_status >= 0)
		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
		nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr);
	return 0;
}

static void nfs3_proc_write_setup(struct nfs_pgio_data *data, struct rpc_message *msg)
static void nfs3_proc_write_setup(struct nfs_pgio_header *hdr,
				  struct rpc_message *msg)
{
	msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE];
}
Loading