Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 98794f93 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull aio fixes from Ben LaHaise:
 "The first change from Anatol fixes a regression where io_destroy() no
  longer waits for outstanding aios to complete.  The second corrects a
  memory leak in an error path for vectored aio operations.

  Both of these bug fixes should be queued up for stable as well"

* git://git.kvack.org/~bcrl/aio-fixes:
  aio: fix potential leak in aio_run_iocb().
  aio: block io_destroy() until all context requests are completed
parents 8aa9e85a 754320d6
Loading
Loading
Loading
Loading
+34 −8
Original line number Diff line number Diff line
@@ -112,6 +112,11 @@ struct kioctx {

	struct work_struct	free_work;

	/*
	 * signals when all in-flight requests are done
	 */
	struct completion *requests_done;

	struct {
		/*
		 * This counts the number of available slots in the ringbuffer,
@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
{
	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);

	/* At this point we know that there are no any in-flight requests */
	if (ctx->requests_done)
		complete(ctx->requests_done);

	INIT_WORK(&ctx->free_work, free_ioctx);
	schedule_work(&ctx->free_work);
}
@@ -718,7 +727,8 @@ err:
 *	when the processes owning a context have all exited to encourage
 *	the rapid destruction of the kioctx.
 */
static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
		struct completion *requests_done)
{
	if (!atomic_xchg(&ctx->dead, 1)) {
		struct kioctx_table *table;
@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
		if (ctx->mmap_size)
			vm_munmap(ctx->mmap_base, ctx->mmap_size);

		ctx->requests_done = requests_done;
		percpu_ref_kill(&ctx->users);
	} else {
		if (requests_done)
			complete(requests_done);
	}
}

@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
		 */
		ctx->mmap_size = 0;

		kill_ioctx(mm, ctx);
		kill_ioctx(mm, ctx, NULL);
	}
}

@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
	if (!IS_ERR(ioctx)) {
		ret = put_user(ioctx->user_id, ctxp);
		if (ret)
			kill_ioctx(current->mm, ioctx);
			kill_ioctx(current->mm, ioctx, NULL);
		percpu_ref_put(&ioctx->users);
	}

@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{
	struct kioctx *ioctx = lookup_ioctx(ctx);
	if (likely(NULL != ioctx)) {
		kill_ioctx(current->mm, ioctx);
		struct completion requests_done =
			COMPLETION_INITIALIZER_ONSTACK(requests_done);

		/* Pass requests_done to kill_ioctx() where it can be set
		 * in a thread-safe way. If we try to set it here then we have
		 * a race condition if two io_destroy() called simultaneously.
		 */
		kill_ioctx(current->mm, ioctx, &requests_done);
		percpu_ref_put(&ioctx->users);

		/* Wait until all IO for the context are done. Otherwise kernel
		 * keep using user-space buffers even if user thinks the context
		 * is destroyed.
		 */
		wait_for_completion(&requests_done);

		return 0;
	}
	pr_debug("EINVAL: io_destroy: invalid context id\n");
@@ -1299,9 +1327,7 @@ rw_common:
						&iovec, compat)
			: aio_setup_single_vector(req, rw, buf, &nr_segs,
						  iovec);
		if (ret)
			return ret;

		if (!ret)
			ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
		if (ret < 0) {
			if (iovec != &inline_vec)