Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 05a50a5b authored by Al Viro's avatar Al Viro Committed by Mike Marshall
Browse files

orangefs: have ..._clean_interrupted_...() wait for copy to/from daemon



* turn all those list_del(&op->list) into list_del_init()
* don't pick ops that are already given up in control device
  ->read()/->write_iter().
* have orangefs_clean_interrupted_operation() notice if op is currently
  being copied to/from daemon (by said ->read()/->write_iter()) and
  wait for that to finish.
* when we are done copying to/from daemon and find that it had been
  given up while we were doing that, wake the waiting ..._clean_interrupted_...

As the result, we are guaranteed that orangefs_clean_interrupted_operation(op)
doesn't return until nobody else can see op.  Moreover, we don't need to play
with op refcounts anymore.

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarMike Marshall <hubcap@omnibond.com>
parent 5964c1b8
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -58,9 +58,9 @@ static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
				 next,
				 &htable_ops_in_progress[index],
				 list) {
		if (op->tag == tag && !op_state_purged(op)) {
		if (op->tag == tag && !op_state_purged(op) &&
		    !op_state_given_up(op)) {
			list_del_init(&op->list);
			get_op(op); /* increase ref count. */
			spin_unlock(&htable_ops_in_progress_lock);
			return op;
		}
@@ -133,7 +133,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
		__s32 fsid;
		/* This lock is held past the end of the loop when we break. */
		spin_lock(&op->lock);
		if (unlikely(op_state_purged(op))) {
		if (unlikely(op_state_purged(op) || op_state_given_up(op))) {
			spin_unlock(&op->lock);
			continue;
		}
@@ -199,13 +199,12 @@ static ssize_t orangefs_devreq_read(struct file *file,
	 */
	if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
		gossip_err("orangefs: ERROR: Current op already queued.\n");
		list_del(&cur_op->list);
		list_del_init(&cur_op->list);
		spin_unlock(&cur_op->lock);
		spin_unlock(&orangefs_request_list_lock);
		return -EAGAIN;
	}
	list_del_init(&cur_op->list);
	get_op(op);
	spin_unlock(&orangefs_request_list_lock);

	spin_unlock(&cur_op->lock);
@@ -230,7 +229,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
	if (unlikely(op_state_given_up(cur_op))) {
		spin_unlock(&cur_op->lock);
		spin_unlock(&htable_ops_in_progress_lock);
		op_release(cur_op);
		complete(&cur_op->waitq);
		goto restart;
	}

@@ -242,7 +241,6 @@ static ssize_t orangefs_devreq_read(struct file *file,
	orangefs_devreq_add_op(cur_op);
	spin_unlock(&cur_op->lock);
	spin_unlock(&htable_ops_in_progress_lock);
	op_release(cur_op);

	/* The client only asks to read one size buffer. */
	return MAX_DEV_REQ_UPSIZE;
@@ -258,10 +256,12 @@ static ssize_t orangefs_devreq_read(struct file *file,
	if (likely(!op_state_given_up(cur_op))) {
		set_op_state_waiting(cur_op);
		list_add(&cur_op->list, &orangefs_request_list);
	}
		spin_unlock(&cur_op->lock);
	} else {
		spin_unlock(&cur_op->lock);
		complete(&cur_op->waitq);
	}
	spin_unlock(&orangefs_request_list_lock);
	op_release(cur_op);
	return -EFAULT;
}

@@ -405,11 +405,11 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
		put_cancel(op);
	} else if (unlikely(op_state_given_up(op))) {
		spin_unlock(&op->lock);
		complete(&op->waitq);
	} else {
		set_op_state_serviced(op);
		spin_unlock(&op->lock);
	}
	op_release(op);
	return ret;

Efault:
+1 −1
Original line number Diff line number Diff line
@@ -259,7 +259,7 @@ static inline void set_op_state_purged(struct orangefs_kernel_op_s *op)
{
	spin_lock(&op->lock);
	if (unlikely(op_is_cancel(op))) {
		list_del(&op->list);
		list_del_init(&op->list);
		spin_unlock(&op->lock);
		put_cancel(op);
	} else {
+10 −12
Original line number Diff line number Diff line
@@ -208,15 +208,20 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
	 * Called with op->lock held.
	 */
	op->op_state |= OP_VFS_STATE_GIVEN_UP;

	if (op_state_waiting(op)) {
	/* from that point on it can't be moved by anybody else */
	if (list_empty(&op->list)) {
		/* caught copying to/from daemon */
		BUG_ON(op_state_serviced(op));
		spin_unlock(&op->lock);
		wait_for_completion(&op->waitq);
	} else if (op_state_waiting(op)) {
		/*
		 * upcall hasn't been read; remove op from upcall request
		 * list.
		 */
		spin_unlock(&op->lock);
		spin_lock(&orangefs_request_list_lock);
		list_del(&op->list);
		list_del_init(&op->list);
		spin_unlock(&orangefs_request_list_lock);
		gossip_debug(GOSSIP_WAIT_DEBUG,
			     "Interrupted: Removed op %p from request_list\n",
@@ -225,23 +230,16 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
		/* op must be removed from the in progress htable */
		spin_unlock(&op->lock);
		spin_lock(&htable_ops_in_progress_lock);
		list_del(&op->list);
		list_del_init(&op->list);
		spin_unlock(&htable_ops_in_progress_lock);
		gossip_debug(GOSSIP_WAIT_DEBUG,
			     "Interrupted: Removed op %p"
			     " from htable_ops_in_progress\n",
			     op);
	} else if (!op_state_serviced(op)) {
	} else {
		spin_unlock(&op->lock);
		gossip_err("interrupted operation is in a weird state 0x%x\n",
			   op->op_state);
	} else {
		/*
		 * It is not intended for execution to flow here,
		 * but having this unlock here makes sparse happy.
		 */
		gossip_err("%s: can't get here.\n", __func__);
		spin_unlock(&op->lock);
	}
	reinit_completion(&op->waitq);
}