Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cf15900e authored by Jens Axboe's avatar Jens Axboe
Browse files

aio: remove request submission batching



This should be useless now that we have on-stack plugging. So lets just
kill it.

Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 9f5b9425
Loading
Loading
Loading
Loading
+3 −72
Original line number Diff line number Diff line
@@ -34,8 +34,6 @@
#include <linux/security.h>
#include <linux/eventfd.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/hash.h>
#include <linux/compat.h>

#include <asm/kmap_types.h>
@@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);

#define AIO_BATCH_HASH_BITS	3 /* allocated on-stack, so don't go crazy */
#define AIO_BATCH_HASH_SIZE	(1 << AIO_BATCH_HASH_BITS)
struct aio_batch_entry {
	struct hlist_node list;
	struct address_space *mapping;
};
mempool_t *abe_pool;

static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);

@@ -86,8 +76,7 @@ static int __init aio_setup(void)
	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);

	aio_wq = create_workqueue("aio");
	abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry));
	BUG_ON(!aio_wq || !abe_pool);
	BUG_ON(!aio_wq);

	pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));

@@ -1512,59 +1501,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
	return 0;
}

static void aio_batch_add(struct address_space *mapping,
			  struct hlist_head *batch_hash)
{
	struct aio_batch_entry *abe;
	struct hlist_node *pos;
	unsigned bucket;

	bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
	hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
		if (abe->mapping == mapping)
			return;
	}

	abe = mempool_alloc(abe_pool, GFP_KERNEL);

	/*
	 * we should be using igrab here, but
	 * we don't want to hammer on the global
	 * inode spinlock just to take an extra
	 * reference on a file that we must already
	 * have a reference to.
	 *
	 * When we're called, we always have a reference
	 * on the file, so we must always have a reference
	 * on the inode, so ihold() is safe here.
	 */
	ihold(mapping->host);
	abe->mapping = mapping;
	hlist_add_head(&abe->list, &batch_hash[bucket]);
	return;
}

static void aio_batch_free(struct hlist_head *batch_hash)
{
	struct aio_batch_entry *abe;
	struct hlist_node *pos, *n;
	int i;

	/*
	 * TODO: kill this
	 */
	for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
		hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
			iput(abe->mapping->host);
			hlist_del(&abe->list);
			mempool_free(abe, abe_pool);
		}
	}
}

static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
			 struct iocb *iocb, struct hlist_head *batch_hash,
			 bool compat)
			 struct iocb *iocb, bool compat)
{
	struct kiocb *req;
	struct file *file;
@@ -1638,11 +1576,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
			;
	}
	spin_unlock_irq(&ctx->ctx_lock);
	if (req->ki_opcode == IOCB_CMD_PREAD ||
	    req->ki_opcode == IOCB_CMD_PREADV ||
	    req->ki_opcode == IOCB_CMD_PWRITE ||
	    req->ki_opcode == IOCB_CMD_PWRITEV)
		aio_batch_add(file->f_mapping, batch_hash);

	aio_put_req(req);	/* drop extra ref to req */
	return 0;
@@ -1659,7 +1592,6 @@ long do_io_submit(aio_context_t ctx_id, long nr,
	struct kioctx *ctx;
	long ret = 0;
	int i;
	struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
	struct blk_plug plug;

	if (unlikely(nr < 0))
@@ -1697,12 +1629,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
			break;
		}

		ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
		ret = io_submit_one(ctx, user_iocb, &tmp, compat);
		if (ret)
			break;
	}
	blk_finish_plug(&plug);
	aio_batch_free(batch_hash);

	put_ioctx(ctx);
	return i ? i : ret;