Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1c5921c authored by Chuck Lever's avatar Chuck Lever Committed by Trond Myklebust
Browse files

NFS: Separate functions for counting outstanding NFS direct I/Os



Factor out the logic that increments and decrements the outstanding I/O
count.  This will be a commonly used bit of code in upcoming patches.
Also make this an atomic_t again, since it will be very often manipulated
outside dreq->spin lock.

Signed-off-by: default avatarChuck Lever <cel@netapp.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 816724e6
Loading
Loading
Loading
Loading
+20 −19
Original line number Diff line number Diff line
@@ -80,8 +80,8 @@ struct nfs_direct_req {
	unsigned int		npages;		/* count of pages */

	/* completion state */
	atomic_t		io_count;	/* i/os we're waiting for */
	spinlock_t		lock;		/* protect completion state */
	int			outstanding;	/* i/os we're waiting for */
	ssize_t			count,		/* bytes actually processed */
				error;		/* any reported error */
	struct completion	completion;	/* wait for i/o completion */
@@ -97,6 +97,16 @@ struct nfs_direct_req {
static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);

static inline void get_dreq(struct nfs_direct_req *dreq)
{
	atomic_inc(&dreq->io_count);
}

static inline int put_dreq(struct nfs_direct_req *dreq)
{
	return atomic_dec_and_test(&dreq->io_count);
}

/**
 * nfs_direct_IO - NFS address space operation for direct I/O
 * @rw: direction (read or write)
@@ -180,7 +190,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
	dreq->iocb = NULL;
	dreq->ctx = NULL;
	spin_lock_init(&dreq->lock);
	dreq->outstanding = 0;
	atomic_set(&dreq->io_count, 0);
	dreq->count = 0;
	dreq->error = 0;
	dreq->flags = 0;
@@ -278,7 +288,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
		list_add(&data->pages, list);

		data->req = (struct nfs_page *) dreq;
		dreq->outstanding++;
		get_dreq(dreq);
		if (nbytes <= rsize)
			break;
		nbytes -= rsize;
@@ -302,12 +312,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
	else
		dreq->error = task->tk_status;

	if (--dreq->outstanding) {
	spin_unlock(&dreq->lock);
		return;
	}

	spin_unlock(&dreq->lock);
	if (put_dreq(dreq))
		nfs_direct_complete(dreq);
}

@@ -432,7 +439,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)

	list_splice_init(&dreq->rewrite_list, &dreq->list);
	list_for_each(pos, &dreq->list)
		dreq->outstanding++;
		get_dreq(dreq);
	dreq->count = 0;

	nfs_direct_write_schedule(dreq, FLUSH_STABLE);
@@ -564,7 +571,7 @@ static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize
		list_add(&data->pages, list);

		data->req = (struct nfs_page *) dreq;
		dreq->outstanding++;
		get_dreq(dreq);
		if (nbytes <= wsize)
			break;
		nbytes -= wsize;
@@ -620,13 +627,7 @@ static void nfs_direct_write_release(void *calldata)
	struct nfs_write_data *data = calldata;
	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;

	spin_lock(&dreq->lock);
	if (--dreq->outstanding) {
		spin_unlock(&dreq->lock);
		return;
	}
	spin_unlock(&dreq->lock);

	if (put_dreq(dreq))
		nfs_direct_write_complete(dreq, data->inode);
}