Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c20202c5 authored by Al Viro's avatar Al Viro Committed by Greg Kroah-Hartman
Browse files

aio: keep io_event in aio_kiocb



commit a9339b7855094ba11a97e8822ae038135e879e79 upstream.

We want to separate forming the resulting io_event from putting it
into the ring buffer.

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Cc: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 592ea630
Loading
Loading
Loading
Loading
+13 −18
Original line number Diff line number Diff line
@@ -198,8 +198,7 @@ struct aio_kiocb {
	struct kioctx		*ki_ctx;
	kiocb_cancel_fn		*ki_cancel;

	struct iocb __user	*ki_user_iocb;	/* user's aiocb */
	__u64			ki_user_data;	/* user's data for completion */
	struct io_event		ki_res;

	struct list_head	ki_list;	/* the aio core uses this
						 * for cancellation */
@@ -1078,15 +1077,6 @@ static inline void iocb_put(struct aio_kiocb *iocb)
		iocb_destroy(iocb);
}

static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
			   long res, long res2)
{
	ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
	ev->data = iocb->ki_user_data;
	ev->res = res;
	ev->res2 = res2;
}

/* aio_complete
 *	Called when the io request on the given iocb is complete.
 */
@@ -1098,6 +1088,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
	unsigned tail, pos, head;
	unsigned long	flags;

	iocb->ki_res.res = res;
	iocb->ki_res.res2 = res2;
	/*
	 * Add a completion event to the ring buffer. Must be done holding
	 * ctx->completion_lock to prevent other code from messing with the tail
@@ -1114,14 +1106,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
	event = ev_page + pos % AIO_EVENTS_PER_PAGE;

	aio_fill_event(event, iocb, res, res2);
	*event = iocb->ki_res;

	kunmap_atomic(ev_page);
	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);

	pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
		 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
		 res, res2);
	pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
		 (void __user *)(unsigned long)iocb->ki_res.obj,
		 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);

	/* after flagging the request as done, we
	 * must never even look at it again
@@ -1838,8 +1830,10 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
		goto out_put_req;
	}

	req->ki_user_iocb = user_iocb;
	req->ki_user_data = iocb->aio_data;
	req->ki_res.obj = (u64)(unsigned long)user_iocb;
	req->ki_res.data = iocb->aio_data;
	req->ki_res.res = 0;
	req->ki_res.res2 = 0;

	switch (iocb->aio_lio_opcode) {
	case IOCB_CMD_PREAD:
@@ -2009,6 +2003,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
	struct aio_kiocb *kiocb;
	int ret = -EINVAL;
	u32 key;
	u64 obj = (u64)(unsigned long)iocb;

	if (unlikely(get_user(key, &iocb->aio_key)))
		return -EFAULT;
@@ -2022,7 +2017,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
	spin_lock_irq(&ctx->ctx_lock);
	/* TODO: use a hash or array, this sucks. */
	list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
		if (kiocb->ki_user_iocb == iocb) {
		if (kiocb->ki_res.obj == obj) {
			ret = kiocb->ki_cancel(&kiocb->rw);
			list_del_init(&kiocb->ki_list);
			break;