Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d0a8a9d authored by Trond Myklebust's avatar Trond Myklebust
Browse files

[PATCH] NFSv4: Clean up nfs4 lock state accounting



 Ensure that lock owner structures are not released prematurely.

 Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent ecdbf769
Loading
Loading
Loading
Loading
+3 −6
Original line number Diff line number Diff line
@@ -128,6 +128,7 @@ struct nfs4_state_owner {

struct nfs4_lock_state {
	struct list_head	ls_locks;	/* Other lock stateids */
	struct nfs4_state *	ls_state;	/* Pointer to open state */
	fl_owner_t		ls_owner;	/* POSIX lock owner */
#define NFS_LOCK_INITIALIZED 1
	int			ls_flags;
@@ -153,7 +154,7 @@ struct nfs4_state {

	unsigned long flags;		/* Do we hold any locks? */
	struct semaphore lock_sema;	/* Serializes file locking operations */
	rwlock_t state_lock;		/* Protects the lock_states list */
	spinlock_t state_lock;		/* Protects the lock_states list */

	nfs4_stateid stateid;

@@ -225,12 +226,8 @@ extern void nfs4_close_state(struct nfs4_state *, mode_t);
extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
extern void nfs4_schedule_state_recovery(struct nfs4_client *);
extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t);
extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t);
extern void nfs4_put_lock_state(struct nfs4_lock_state *state);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);

extern const nfs4_stateid zero_stateid;
+30 −39
Original line number Diff line number Diff line
@@ -2626,14 +2626,11 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
	down_read(&clp->cl_sem);
	nlo.clientid = clp->cl_clientid;
	down(&state->lock_sema);
	lsp = nfs4_find_lock_state(state, request->fl_owner);
	if (lsp)
	status = nfs4_set_lock_state(state, request);
	if (status != 0)
		goto out;
	lsp = request->fl_u.nfs4_fl.owner;
	nlo.id = lsp->ls_id; 
	else {
		spin_lock(&clp->cl_lock);
		nlo.id = nfs4_alloc_lockowner_id(clp);
		spin_unlock(&clp->cl_lock);
	}
	arg.u.lockt = &nlo;
	status = rpc_call_sync(server->client, &msg, 0);
	if (!status) {
@@ -2654,8 +2651,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
		request->fl_pid = 0;
		status = 0;
	}
	if (lsp)
		nfs4_put_lock_state(lsp);
out:
	up(&state->lock_sema);
	up_read(&clp->cl_sem);
	return status;
@@ -2715,28 +2711,26 @@ static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock
	};
	struct nfs4_lock_state *lsp;
	struct nfs_locku_opargs luargs;
	int status = 0;
	int status;
			
	down_read(&clp->cl_sem);
	down(&state->lock_sema);
	lsp = nfs4_find_lock_state(state, request->fl_owner);
	if (!lsp)
	status = nfs4_set_lock_state(state, request);
	if (status != 0)
		goto out;
	lsp = request->fl_u.nfs4_fl.owner;
	/* We might have lost the locks! */
	if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
	if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
		goto out;
	luargs.seqid = lsp->ls_seqid;
	memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
	arg.u.locku = &luargs;
	status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
	nfs4_increment_lock_seqid(status, lsp);
	}

	if (status == 0) {
	if (status == 0)
		memcpy(&lsp->ls_stateid,  &res.u.stateid, 
				sizeof(lsp->ls_stateid));
		nfs4_notify_unlck(state, request, lsp);
	}
	nfs4_put_lock_state(lsp);
out:
	up(&state->lock_sema);
	if (status == 0)
@@ -2762,7 +2756,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
{
	struct inode *inode = state->inode;
	struct nfs_server *server = NFS_SERVER(inode);
	struct nfs4_lock_state *lsp;
	struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
	struct nfs_lockargs arg = {
		.fh = NFS_FH(inode),
		.type = nfs4_lck_type(cmd, request),
@@ -2784,9 +2778,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
	};
	int status;

	lsp = nfs4_get_lock_state(state, request->fl_owner);
	if (lsp == NULL)
		return -ENOMEM;
	if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {
		struct nfs4_state_owner *owner = state->owner;
		struct nfs_open_to_lock otl = {
@@ -2808,27 +2799,26 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
		* seqid mutating errors */
		nfs4_increment_seqid(status, owner);
		up(&owner->so_sema);
		if (status == 0) {
			lsp->ls_flags |= NFS_LOCK_INITIALIZED;
			lsp->ls_seqid++;
		}
	} else {
		struct nfs_exist_lock el = {
			.seqid = lsp->ls_seqid,
		};
		memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));
		largs.u.exist_lock = &el;
		largs.new_lock_owner = 0;
		arg.u.lock = &largs;
		status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
	}
		/* increment seqid on success, and * seqid mutating errors*/
		nfs4_increment_lock_seqid(status, lsp);
	}
	/* save the returned stateid. */
	if (status == 0) {
	if (status == 0)
		memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
		lsp->ls_flags |= NFS_LOCK_INITIALIZED;
		if (!reclaim)
			nfs4_notify_setlk(state, request, lsp);
	} else if (status == -NFS4ERR_DENIED)
	else if (status == -NFS4ERR_DENIED)
		status = -EAGAIN;
	nfs4_put_lock_state(lsp);
	return status;
}

@@ -2869,6 +2859,8 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock

	down_read(&clp->cl_sem);
	down(&state->lock_sema);
	status = nfs4_set_lock_state(state, request);
	if (status == 0)
		status = _nfs4_do_setlk(state, cmd, request, 0);
	up(&state->lock_sema);
	if (status == 0) {
@@ -2927,7 +2919,6 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
		if (signalled())
			break;
	} while(status < 0);

	return status;
}

+79 −99
Original line number Diff line number Diff line
@@ -360,7 +360,7 @@ nfs4_alloc_open_state(void)
	atomic_set(&state->count, 1);
	INIT_LIST_HEAD(&state->lock_states);
	init_MUTEX(&state->lock_sema);
	rwlock_init(&state->state_lock);
	spin_lock_init(&state->state_lock);
	return state;
}

@@ -542,16 +542,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
	return NULL;
}

struct nfs4_lock_state *
nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
{
	struct nfs4_lock_state *lsp;
	read_lock(&state->state_lock);
	lsp = __nfs4_find_lock_state(state, fl_owner);
	read_unlock(&state->state_lock);
	return lsp;
}

/*
 * Return a compatible lock_state. If no initialized lock_state structure
 * exists, return an uninitialized one.
@@ -568,14 +558,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
		return NULL;
	lsp->ls_flags = 0;
	lsp->ls_seqid = 0;	/* arbitrary */
	lsp->ls_id = -1; 
	memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
	atomic_set(&lsp->ls_count, 1);
	lsp->ls_owner = fl_owner;
	INIT_LIST_HEAD(&lsp->ls_locks);
	spin_lock(&clp->cl_lock);
	lsp->ls_id = nfs4_alloc_lockowner_id(clp);
	spin_unlock(&clp->cl_lock);
	INIT_LIST_HEAD(&lsp->ls_locks);
	return lsp;
}

@@ -585,121 +574,112 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
 *
 * The caller must be holding state->lock_sema and clp->cl_sem
 */
struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
{
	struct nfs4_lock_state * lsp;
	struct nfs4_lock_state *lsp, *new = NULL;
	
	lsp = nfs4_find_lock_state(state, owner);
	if (lsp == NULL)
		lsp = nfs4_alloc_lock_state(state, owner);
	for(;;) {
		spin_lock(&state->state_lock);
		lsp = __nfs4_find_lock_state(state, owner);
		if (lsp != NULL)
			break;
		if (new != NULL) {
			new->ls_state = state;
			list_add(&new->ls_locks, &state->lock_states);
			set_bit(LK_STATE_IN_USE, &state->flags);
			lsp = new;
			new = NULL;
			break;
		}
		spin_unlock(&state->state_lock);
		new = nfs4_alloc_lock_state(state, owner);
		if (new == NULL)
			return NULL;
	}
	spin_unlock(&state->state_lock);
	kfree(new);
	return lsp;
}

/*
 * Byte-range lock aware utility to initialize the stateid of read/write
 * requests.
 * Release reference to lock_state, and free it if we see that
 * it is no longer in use
 */
void
nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
{
	if (test_bit(LK_STATE_IN_USE, &state->flags)) {
		struct nfs4_lock_state *lsp;
	struct nfs4_state *state;

		lsp = nfs4_find_lock_state(state, fl_owner);
		if (lsp) {
			memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
			nfs4_put_lock_state(lsp);
	if (lsp == NULL)
		return;
		}
	}
	memcpy(dst, &state->stateid, sizeof(*dst));
	state = lsp->ls_state;
	if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
		return;
	list_del(&lsp->ls_locks);
	if (list_empty(&state->lock_states))
		clear_bit(LK_STATE_IN_USE, &state->flags);
	spin_unlock(&state->state_lock);
	kfree(lsp);
}

/*
* Called with state->lock_sema and clp->cl_sem held.
*/
void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
{
	if (status == NFS_OK || seqid_mutating_err(-status))
		lsp->ls_seqid++;
}
	struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;

/* 
* Check to see if the request lock (type FL_UNLK) effects the fl lock.
*
* fl and request must have the same posix owner
*
* return: 
* 0 -> fl not effected by request
* 1 -> fl consumed by request
*/
	dst->fl_u.nfs4_fl.owner = lsp;
	atomic_inc(&lsp->ls_count);
}

static int
nfs4_check_unlock(struct file_lock *fl, struct file_lock *request)
static void nfs4_fl_release_lock(struct file_lock *fl)
{
	if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end)
		return 1;
	return 0;
	nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
}

/*
 * Post an initialized lock_state on the state->lock_states list.
 */
void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
static struct file_lock_operations nfs4_fl_lock_ops = {
	.fl_copy_lock = nfs4_fl_copy_lock,
	.fl_release_private = nfs4_fl_release_lock,
};

int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
{
	if (!list_empty(&lsp->ls_locks))
		return;
	atomic_inc(&lsp->ls_count);
	write_lock(&state->state_lock);
	list_add(&lsp->ls_locks, &state->lock_states);
	set_bit(LK_STATE_IN_USE, &state->flags);
	write_unlock(&state->state_lock);
	struct nfs4_lock_state *lsp;

	if (fl->fl_ops != NULL)
		return 0;
	lsp = nfs4_get_lock_state(state, fl->fl_owner);
	if (lsp == NULL)
		return -ENOMEM;
	fl->fl_u.nfs4_fl.owner = lsp;
	fl->fl_ops = &nfs4_fl_lock_ops;
	return 0;
}

/*
 * to decide to 'reap' lock state:
 * 1) search i_flock for file_locks with fl.lock_state = to ls.
 * 2) determine if unlock will consume found lock. 
 * 	if so, reap
 *
 * 	else, don't reap.
 *
 * Byte-range lock aware utility to initialize the stateid of read/write
 * requests.
 */
void
nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
{
	struct inode *inode = state->inode;
	struct file_lock *fl;
	struct nfs4_lock_state *lsp;

	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
		if (!(fl->fl_flags & FL_POSIX))
			continue;
		if (fl->fl_owner != lsp->ls_owner)
			continue;
		/* Exit if we find at least one lock which is not consumed */
		if (nfs4_check_unlock(fl,request) == 0)
	memcpy(dst, &state->stateid, sizeof(*dst));
	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
		return;
	}

	write_lock(&state->state_lock);
	list_del_init(&lsp->ls_locks);
	if (list_empty(&state->lock_states))
		clear_bit(LK_STATE_IN_USE, &state->flags);
	write_unlock(&state->state_lock);
	spin_lock(&state->state_lock);
	lsp = __nfs4_find_lock_state(state, fl_owner);
	if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
		memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
	spin_unlock(&state->state_lock);
	nfs4_put_lock_state(lsp);
}

/*
 * Release reference to lock_state, and free it if we see that
 * it is no longer in use
* Called with state->lock_sema and clp->cl_sem held.
*/
void
nfs4_put_lock_state(struct nfs4_lock_state *lsp)
void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
{
	if (!atomic_dec_and_test(&lsp->ls_count))
		return;
	BUG_ON (!list_empty(&lsp->ls_locks));
	kfree(lsp);
	if (status == NFS_OK || seqid_mutating_err(-status))
		lsp->ls_seqid++;
}

/*
+1 −0
Original line number Diff line number Diff line
@@ -674,6 +674,7 @@ struct file_lock {
	struct lock_manager_operations *fl_lmops;	/* Callbacks for lockmanagers */
	union {
		struct nfs_lock_info	nfs_fl;
		struct nfs4_lock_info	nfs4_fl;
	} fl_u;
};

+5 −0
Original line number Diff line number Diff line
@@ -16,6 +16,11 @@ struct nfs_lock_info {
	struct nlm_lockowner *owner;
};

struct nfs4_lock_state;
struct nfs4_lock_info {
	struct nfs4_lock_state *owner;
};

/*
 * Lock flag values
 */