Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 851c9f38 authored by Mike Snitzer's avatar Mike Snitzer
Browse files

Merge remote-tracking branch 'jens/for-4.1/core' into dm/for-next

parents e9637415 c76cbbcf
Loading
Loading
Loading
Loading
+16 −3
Original line number Diff line number Diff line
@@ -557,6 +557,18 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);

/* Allocate memory local to the request queue */
static void *alloc_request_struct(gfp_t gfp_mask, void *data)
{
	int nid = (int)(long)data;
	return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
}

static void free_request_struct(void *element, void *unused)
{
	kmem_cache_free(request_cachep, element);
}

int blk_init_rl(struct request_list *rl, struct request_queue *q,
		gfp_t gfp_mask)
{
@@ -569,9 +581,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
	init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
	init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);

	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
					  mempool_free_slab, request_cachep,
					  gfp_mask, q->node);
	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
					  free_request_struct,
					  (void *)(long)q->node, gfp_mask,
					  q->node);
	if (!rl->rq_pool)
		return -ENOMEM;

+1 −0
Original line number Diff line number Diff line
@@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk)

	return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_register_disk);

void blk_mq_sysfs_unregister(struct request_queue *q)
{
+32 −22
Original line number Diff line number Diff line
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);

static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
static void blk_mq_run_queues(struct request_queue *q);

/*
 * Check if any of the ctx's have pending work in this hardware queue
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
	clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
}

static int blk_mq_queue_enter(struct request_queue *q)
static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
{
	while (true) {
		int ret;
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
		if (percpu_ref_tryget_live(&q->mq_usage_counter))
			return 0;

		if (!(gfp & __GFP_WAIT))
			return -EBUSY;

		ret = wait_event_interruptible(q->mq_freeze_wq,
				!q->mq_freeze_depth || blk_queue_dying(q));
		if (blk_queue_dying(q))
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)

	if (freeze) {
		percpu_ref_kill(&q->mq_usage_counter);
		blk_mq_run_queues(q);
		blk_mq_run_hw_queues(q, false);
	}
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
	struct blk_mq_alloc_data alloc_data;
	int ret;

	ret = blk_mq_queue_enter(q);
	ret = blk_mq_queue_enter(q, gfp);
	if (ret)
		return ERR_PTR(ret);

@@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
			&hctx->run_work, 0);
}

static void blk_mq_run_queues(struct request_queue *q)
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
	struct blk_mq_hw_ctx *hctx;
	int i;
@@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q)
		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
			continue;

		blk_mq_run_hw_queue(hctx, false);
		blk_mq_run_hw_queue(hctx, async);
	}
}
EXPORT_SYMBOL(blk_mq_run_hw_queues);

void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
	int rw = bio_data_dir(bio);
	struct blk_mq_alloc_data alloc_data;

	if (unlikely(blk_mq_queue_enter(q))) {
	if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
		bio_endio(bio, -EIO);
		return NULL;
	}
@@ -1890,10 +1893,26 @@ void blk_mq_release(struct request_queue *q)
}

struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
	struct request_queue *uninit_q, *q;

	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
	if (!uninit_q)
		return ERR_PTR(-ENOMEM);

	q = blk_mq_init_allocated_queue(set, uninit_q);
	if (IS_ERR(q))
		blk_cleanup_queue(uninit_q);

	return q;
}
EXPORT_SYMBOL(blk_mq_init_queue);

struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
						  struct request_queue *q)
{
	struct blk_mq_hw_ctx **hctxs;
	struct blk_mq_ctx __percpu *ctx;
	struct request_queue *q;
	unsigned int *map;
	int i;

@@ -1928,20 +1947,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
		hctxs[i]->queue_num = i;
	}

	q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
	if (!q)
		goto err_hctxs;

	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
			    PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
		goto err_mq_usage;
		goto err_hctxs;

	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
	blk_queue_rq_timeout(q, 30000);
	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);

	q->nr_queues = nr_cpu_ids;
	q->nr_hw_queues = set->nr_hw_queues;
@@ -1967,9 +1982,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
	else
		blk_queue_make_request(q, blk_sq_make_request);

	if (set->timeout)
		blk_queue_rq_timeout(q, set->timeout);

	/*
	 * Do this after blk_queue_make_request() overrides it...
	 */
@@ -1981,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);

	if (blk_mq_init_hw_queues(q, set))
		goto err_mq_usage;
		goto err_hctxs;

	mutex_lock(&all_q_mutex);
	list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,8 +2005,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)

	return q;

err_mq_usage:
	blk_cleanup_queue(q);
err_hctxs:
	kfree(map);
	for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2009,7 +2019,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
	free_percpu(ctx);
	return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(blk_mq_init_queue);
EXPORT_SYMBOL(blk_mq_init_allocated_queue);

void blk_mq_free_queue(struct request_queue *q)
{
@@ -2161,7 +2171,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
		return -EINVAL;

	if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
	if (!set->ops->queue_rq || !set->ops->map_queue)
		return -EINVAL;

	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
+3 −0
Original line number Diff line number Diff line
@@ -164,6 +164,8 @@ enum {
		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)

struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
						  struct request_queue *q);
void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
@@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
		void *priv);