Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc602dd7 authored by Trond Myklebust's avatar Trond Myklebust
Browse files

NFS/pNFS: Fix up pNFS write reschedule layering violations and bugs



The flexfiles layout in particular, seems to want to poke around in the
O_DIRECT flags when retransmitting.
This patch sets up an interface to allow it to call back into O_DIRECT
to handle retransmission correctly. It also fixes a potential bug whereby
we could change the behaviour of O_DIRECT if an error is already pending.

Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent e07db907
Loading
Loading
Loading
Loading
+15 −6
Original line number Diff line number Diff line
@@ -117,12 +117,6 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
	return atomic_dec_and_test(&dreq->io_count);
}

void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq)
{
	dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
}
EXPORT_SYMBOL_GPL(nfs_direct_set_resched_writes);

static void
nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
{
@@ -839,10 +833,25 @@ static void nfs_write_sync_pgio_error(struct list_head *head)
	}
}

static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
{
	struct nfs_direct_req *dreq = hdr->dreq;

	spin_lock(&dreq->lock);
	if (dreq->error == 0) {
		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
		/* fake unstable write to let common nfs resend pages */
		hdr->verf.committed = NFS_UNSTABLE;
		hdr->good_bytes = hdr->args.count;
	}
	spin_unlock(&dreq->lock);
}

static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
	.error_cleanup = nfs_write_sync_pgio_error,
	.init_hdr = nfs_direct_pgio_init,
	.completion = nfs_direct_write_completion,
	.reschedule_io = nfs_direct_write_reschedule_io,
};


+1 −12
Original line number Diff line number Diff line
@@ -912,18 +912,7 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
			hdr->args.count,
			(unsigned long long)hdr->args.offset);

		if (!hdr->dreq) {
			struct nfs_open_context *ctx;

			ctx = nfs_list_entry(hdr->pages.next)->wb_context;
			set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
			hdr->completion_ops->error_cleanup(&hdr->pages);
		} else {
			nfs_direct_set_resched_writes(hdr->dreq);
			/* fake unstable write to let common nfs resend pages */
			hdr->verf.committed = NFS_UNSTABLE;
			hdr->good_bytes = hdr->args.count;
		}
		hdr->completion_ops->reschedule_io(hdr);
		return;
	}

+0 −1
Original line number Diff line number Diff line
@@ -519,7 +519,6 @@ static inline void nfs_inode_dio_wait(struct inode *inode)
	inode_dio_wait(inode);
}
extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
extern void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq);

/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_pgio_header *);
+6 −0
Original line number Diff line number Diff line
@@ -1326,9 +1326,15 @@ static void nfs_async_write_error(struct list_head *head)
	}
}

static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
{
	nfs_async_write_error(&hdr->pages);
}

static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
	.error_cleanup = nfs_async_write_error,
	.completion = nfs_write_completion,
	.reschedule_io = nfs_async_write_reschedule_io,
};

void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+1 −0
Original line number Diff line number Diff line
@@ -1460,6 +1460,7 @@ struct nfs_pgio_completion_ops {
	void	(*error_cleanup)(struct list_head *head);
	void	(*init_hdr)(struct nfs_pgio_header *hdr);
	void	(*completion)(struct nfs_pgio_header *hdr);
	void	(*reschedule_io)(struct nfs_pgio_header *hdr);
};

struct nfs_unlinkdata {