Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a311c480 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull aio fix and cleanups from Ben LaHaise:
 "This consists of a couple of code cleanups plus a minor bug fix"

* git://git.kvack.org/~bcrl/aio-next:
  aio: cleanup: flatten kill_ioctx()
  aio: report error from io_destroy() when threads race in io_destroy()
  fs/aio.c: Remove ctx parameter in kiocb_cancel
parents 05064084 fa88b6f8
Loading
Loading
Loading
Loading
+36 −34
Original line number Diff line number Diff line
@@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);

static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
static int kiocb_cancel(struct kiocb *kiocb)
{
	kiocb_cancel_fn *old, *cancel;

@@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref)
				       struct kiocb, ki_list);

		list_del_init(&req->ki_list);
		kiocb_cancel(ctx, req);
		kiocb_cancel(req);
	}

	spin_unlock_irq(&ctx->ctx_lock);
@@ -727,12 +727,15 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 *	when the processes owning a context have all exited to encourage
 *	the rapid destruction of the kioctx.
 */
static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
		struct completion *requests_done)
{
	if (!atomic_xchg(&ctx->dead, 1)) {
	struct kioctx_table *table;

	if (atomic_xchg(&ctx->dead, 1))
		return -EINVAL;


	spin_lock(&mm->ioctx_lock);
	rcu_read_lock();
	table = rcu_dereference(mm->ioctx_table);
@@ -759,10 +762,7 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,

	ctx->requests_done = requests_done;
	percpu_ref_kill(&ctx->users);
	} else {
		if (requests_done)
			complete(requests_done);
	}
	return 0;
}

/* wait_on_sync_kiocb:
@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
	if (likely(NULL != ioctx)) {
		struct completion requests_done =
			COMPLETION_INITIALIZER_ONSTACK(requests_done);
		int ret;

		/* Pass requests_done to kill_ioctx() where it can be set
		 * in a thread-safe way. If we try to set it here then we have
		 * a race condition if two io_destroy() called simultaneously.
		 */
		kill_ioctx(current->mm, ioctx, &requests_done);
		ret = kill_ioctx(current->mm, ioctx, &requests_done);
		percpu_ref_put(&ioctx->users);

		/* Wait until all IO for the context are done. Otherwise kernel
		 * keep using user-space buffers even if user thinks the context
		 * is destroyed.
		 */
		if (!ret)
			wait_for_completion(&requests_done);

		return 0;
		return ret;
	}
	pr_debug("EINVAL: io_destroy: invalid context id\n");
	return -EINVAL;
@@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,

	kiocb = lookup_kiocb(ctx, iocb, key);
	if (kiocb)
		ret = kiocb_cancel(ctx, kiocb);
		ret = kiocb_cancel(kiocb);
	else
		ret = -EINVAL;