Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7c24d9f3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-4.5/core' of git://git.kernel.dk/linux-block

Pull core block updates from Jens Axboe:
 "We don't have a lot of core changes this time around, it's mostly in
  drivers, which will come in a subsequent pull.

  The cores changes include:

   - blk-mq
        - Prep patch from Christoph, changing blk_mq_alloc_request() to
          take flags instead of just using gfp_t for sleep/nosleep.
        - Doc patch from me, clarifying the difference between legacy
          and blk-mq for timer usage.
        - Fixes from Raghavendra for memory-less numa nodes, and a reuse
          of CPU masks.

   - Cleanup from Geliang Tang, using offset_in_page() instead of open
     coding it.

   - From Ilya, rename request_queue slab to it reflects what it holds,
     and a fix for proper use of bdgrab/put.

   - A real fix for the split across stripe boundaries from Keith.  We
     yanked a broken version of this from 4.4-rc final, this one works.

   - From Mike Krinkin, emit a trace message when we split.

   - From Wei Tang, two small cleanups, not explicitly clearing memory
     that is already cleared"

* 'for-4.5/core' of git://git.kernel.dk/linux-block:
  block: use bd{grab,put}() instead of open-coding
  block: split bios to max possible length
  block: add call to split trace point
  blk-mq: Avoid memoryless numa node encoded in hctx numa_node
  blk-mq: Reuse hardware context cpumask for tags
  blk-mq: add a flags parameter to blk_mq_alloc_request
  Revert "blk-flush: Queue through IO scheduler when flush not required"
  block: clarify blk_add_timer() use case for blk-mq
  bio: use offset_in_page macro
  block: do not initialise statics to 0 or NULL
  block: do not initialise globals to 0 or NULL
  block: rename request_queue slab cache
parents 99e38df8 ed8a9d2c
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1125,7 +1125,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
	int i, ret;
	int nr_pages = 0;
	unsigned int len = iter->count;
	unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;

	for (i = 0; i < iter->nr_segs; i++) {
		unsigned long uaddr;
@@ -1304,7 +1304,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
			goto out_unmap;
		}

		offset = uaddr & ~PAGE_MASK;
		offset = offset_in_page(uaddr);
		for (j = cur_page; j < page_limit; j++) {
			unsigned int bytes = PAGE_SIZE - offset;

+8 −7
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ DEFINE_IDA(blk_queue_ida);
/*
 * For the allocated request tables
 */
struct kmem_cache *request_cachep = NULL;
struct kmem_cache *request_cachep;

/*
 * For queue allocation
@@ -646,7 +646,7 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);

int blk_queue_enter(struct request_queue *q, gfp_t gfp)
int blk_queue_enter(struct request_queue *q, bool nowait)
{
	while (true) {
		int ret;
@@ -654,7 +654,7 @@ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
		if (percpu_ref_tryget_live(&q->q_usage_counter))
			return 0;

		if (!gfpflags_allow_blocking(gfp))
		if (nowait)
			return -EBUSY;

		ret = wait_event_interruptible(q->mq_freeze_wq,
@@ -1292,7 +1292,9 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
	if (q->mq_ops)
		return blk_mq_alloc_request(q, rw, gfp_mask, false);
		return blk_mq_alloc_request(q, rw,
			(gfp_mask & __GFP_DIRECT_RECLAIM) ?
				0 : BLK_MQ_REQ_NOWAIT);
	else
		return blk_old_get_request(q, rw, gfp_mask);
}
@@ -2060,8 +2062,7 @@ blk_qc_t generic_make_request(struct bio *bio)
	do {
		struct request_queue *q = bdev_get_queue(bio->bi_bdev);

		if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {

		if (likely(blk_queue_enter(q, false) == 0)) {
			ret = q->make_request_fn(q, bio);

			blk_queue_exit(q);
@@ -3534,7 +3535,7 @@ int __init blk_dev_init(void)
	request_cachep = kmem_cache_create("blkdev_requests",
			sizeof(struct request), 0, SLAB_PANIC, NULL);

	blk_requestq_cachep = kmem_cache_create("blkdev_queue",
	blk_requestq_cachep = kmem_cache_create("request_queue",
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);

	return 0;
+19 −3
Original line number Diff line number Diff line
@@ -7,6 +7,8 @@
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include <trace/events/block.h>

#include "blk.h"

static struct bio *blk_bio_discard_split(struct request_queue *q,
@@ -81,9 +83,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
	struct bio *new = NULL;

	bio_for_each_segment(bv, bio, iter) {
		if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
			goto split;

		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
@@ -91,6 +90,22 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
			goto split;

		if (sectors + (bv.bv_len >> 9) >
				blk_max_size_offset(q, bio->bi_iter.bi_sector)) {
			/*
			 * Consider this a new segment if we're splitting in
			 * the middle of this vector.
			 */
			if (nsegs < queue_max_segments(q) &&
			    sectors < blk_max_size_offset(q,
						bio->bi_iter.bi_sector)) {
				nsegs++;
				sectors = blk_max_size_offset(q,
						bio->bi_iter.bi_sector);
			}
			goto split;
		}

		if (bvprvp && blk_queue_cluster(q)) {
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
@@ -162,6 +177,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
		split->bi_rw |= REQ_NOMERGE;

		bio_chain(split, *bio);
		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
		generic_make_request(*bio);
		*bio = split;
	}
+1 −1
Original line number Diff line number Diff line
@@ -113,7 +113,7 @@ int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)

	for_each_possible_cpu(i) {
		if (index == mq_map[i])
			return cpu_to_node(i);
			return local_memory_node(cpu_to_node(i));
	}

	return NUMA_NO_NODE;
+5 −6
Original line number Diff line number Diff line
@@ -268,7 +268,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
	if (tag != -1)
		return tag;

	if (!gfpflags_allow_blocking(data->gfp))
	if (data->flags & BLK_MQ_REQ_NOWAIT)
		return -1;

	bs = bt_wait_ptr(bt, hctx);
@@ -303,7 +303,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
		data->ctx = blk_mq_get_ctx(data->q);
		data->hctx = data->q->mq_ops->map_queue(data->q,
				data->ctx->cpu);
		if (data->reserved) {
		if (data->flags & BLK_MQ_REQ_RESERVED) {
			bt = &data->hctx->tags->breserved_tags;
		} else {
			last_tag = &data->ctx->last_tag;
@@ -349,10 +349,9 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)

unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
	if (!data->reserved)
		return __blk_mq_get_tag(data);

	if (data->flags & BLK_MQ_REQ_RESERVED)
		return __blk_mq_get_reserved_tag(data);
	return __blk_mq_get_tag(data);
}

static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
Loading