Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0fb662e2 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'for-3.16/core' into for-3.16/drivers



Pull in core changes (again), since we got rid of the alloc/free
hctx mq_ops hooks and mtip32xx then needed updating again.

Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parents 61789765 cdef54dd
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
/*
 * CPU notifier helper code for blk-mq
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+5 −0
Original line number Diff line number Diff line
/*
 * CPU <-> hardware queue mapping helpers
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/module.h>
+12 −0
Original line number Diff line number Diff line
/*
 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
 * over multiple cachelines to avoid ping-pong between multiple submitters
 * or submitter and completer. Uses rolling wakeups to avoid falling of
 * the scaling cliff when we run out of tags and have to start putting
 * submitters to sleep.
 *
 * Uses active queue tracking to support fairer distribution of tags
 * between multiple submitters when a shared tag map is used.
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
+11 −21
Original line number Diff line number Diff line
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
@@ -1329,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
}
EXPORT_SYMBOL(blk_mq_map_queue);

struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
						   unsigned int hctx_index,
						   int node)
{
	return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
}
EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);

void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
				 unsigned int hctx_index)
{
	kfree(hctx);
}
EXPORT_SYMBOL(blk_mq_free_single_hw_queue);

static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
		struct blk_mq_tags *tags, unsigned int hctx_idx)
{
@@ -1584,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q,

	queue_for_each_hw_ctx(q, hctx, i) {
		free_cpumask_var(hctx->cpumask);
		set->ops->free_hctx(hctx, i);
		kfree(hctx);
	}
}

@@ -1805,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
	for (i = 0; i < set->nr_hw_queues; i++) {
		int node = blk_mq_hw_queue_to_node(map, i);

		hctxs[i] = set->ops->alloc_hctx(set, i, node);
		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
					GFP_KERNEL, node);
		if (!hctxs[i])
			goto err_hctxs;

@@ -1892,7 +1884,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
		if (!hctxs[i])
			break;
		free_cpumask_var(hctxs[i]->cpumask);
		set->ops->free_hctx(hctxs[i], i);
		kfree(hctxs[i]);
	}
err_map:
	kfree(hctxs);
@@ -1977,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
		return -EINVAL;

	if (!set->nr_hw_queues ||
	    !set->ops->queue_rq || !set->ops->map_queue ||
	    !set->ops->alloc_hctx || !set->ops->free_hctx)
	if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
		return -EINVAL;


+0 −2
Original line number Diff line number Diff line
@@ -3832,8 +3832,6 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
static struct blk_mq_ops mtip_mq_ops = {
	.queue_rq	= mtip_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.alloc_hctx	= blk_mq_alloc_single_hw_queue,
	.free_hctx	= blk_mq_free_single_hw_queue,
	.init_request	= mtip_init_cmd,
	.exit_request	= mtip_free_cmd,
};
Loading