Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9035a896 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "It's been a few weeks, so here's a small collection of fixes that
  should go into the current series.

  This contains:

   - NVMe pull request from Christoph, with a few important fixes.

   - kyber hang fix from Omar.

   - A blk-throttl fix from Shaohua, fixing a case where we double
     charge a bio.

   - Two call_single_data alignment fixes from me, fixing up some
     unfortunate changes that went into 4.14 without being properly
     reviewed on the block side (since nobody was CC'ed on the
     patch...).

   - A bounce buffer fix in two parts, one from me and one from Ming.

   - Revert bdi debug error handling patch. It's causing boot issues for
     some folks, and a week down the line, we're still no closer to a
     fix. Revert this patch for now until it's figured out, then we can
     retry for 4.16"

* 'for-linus' of git://git.kernel.dk/linux-block:
  Revert "bdi: add error handle for bdi_debug_register"
  null_blk: unalign call_single_data
  block: unalign call_single_data in struct request
  block-throttle: avoid double charge
  block: fix blk_rq_append_bio
  block: don't let passthrough IO go into .make_request_fn()
  nvme: setup streams after initializing namespace head
  nvme: check hw sectors before setting chunk sectors
  nvme: call blk_integrity_unregister after queue is cleaned up
  nvme-fc: remove double put reference if admin connect fails
  nvme: set discard_alignment to zero
  kyber: fix another domain token wait queue hang
parents 409232a4 6d0e4827
Loading
Loading
Loading
Loading
+2 −0
Original line number Original line Diff line number Diff line
@@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
	bio->bi_disk = bio_src->bi_disk;
	bio->bi_disk = bio_src->bi_disk;
	bio->bi_partno = bio_src->bi_partno;
	bio->bi_partno = bio_src->bi_partno;
	bio_set_flag(bio, BIO_CLONED);
	bio_set_flag(bio, BIO_CLONED);
	if (bio_flagged(bio_src, BIO_THROTTLED))
		bio_set_flag(bio, BIO_THROTTLED);
	bio->bi_opf = bio_src->bi_opf;
	bio->bi_opf = bio_src->bi_opf;
	bio->bi_write_hint = bio_src->bi_write_hint;
	bio->bi_write_hint = bio_src->bi_write_hint;
	bio->bi_iter = bio_src->bi_iter;
	bio->bi_iter = bio_src->bi_iter;
+22 −16
Original line number Original line Diff line number Diff line
@@ -12,22 +12,29 @@
#include "blk.h"
#include "blk.h"


/*
/*
 * Append a bio to a passthrough request.  Only works can be merged into
 * Append a bio to a passthrough request.  Only works if the bio can be merged
 * the request based on the driver constraints.
 * into the request based on the driver constraints.
 */
 */
int blk_rq_append_bio(struct request *rq, struct bio *bio)
int blk_rq_append_bio(struct request *rq, struct bio **bio)
{
{
	blk_queue_bounce(rq->q, &bio);
	struct bio *orig_bio = *bio;

	blk_queue_bounce(rq->q, bio);


	if (!rq->bio) {
	if (!rq->bio) {
		blk_rq_bio_prep(rq->q, rq, bio);
		blk_rq_bio_prep(rq->q, rq, *bio);
	} else {
	} else {
		if (!ll_back_merge_fn(rq->q, rq, bio))
		if (!ll_back_merge_fn(rq->q, rq, *bio)) {
			if (orig_bio != *bio) {
				bio_put(*bio);
				*bio = orig_bio;
			}
			return -EINVAL;
			return -EINVAL;
		}


		rq->biotail->bi_next = bio;
		rq->biotail->bi_next = *bio;
		rq->biotail = bio;
		rq->biotail = *bio;
		rq->__data_len += bio->bi_iter.bi_size;
		rq->__data_len += (*bio)->bi_iter.bi_size;
	}
	}


	return 0;
	return 0;
@@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq,
	 * We link the bounce buffer in and could have to traverse it
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 * later so we have to get a ref to prevent it from being freed
	 */
	 */
	ret = blk_rq_append_bio(rq, bio);
	ret = blk_rq_append_bio(rq, &bio);
	bio_get(bio);
	if (ret) {
	if (ret) {
		bio_endio(bio);
		__blk_rq_unmap_user(orig_bio);
		__blk_rq_unmap_user(orig_bio);
		bio_put(bio);
		return ret;
		return ret;
	}
	}
	bio_get(bio);


	return 0;
	return 0;
}
}
@@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
	int reading = rq_data_dir(rq) == READ;
	int reading = rq_data_dir(rq) == READ;
	unsigned long addr = (unsigned long) kbuf;
	unsigned long addr = (unsigned long) kbuf;
	int do_copy = 0;
	int do_copy = 0;
	struct bio *bio;
	struct bio *bio, *orig_bio;
	int ret;
	int ret;


	if (len > (queue_max_hw_sectors(q) << 9))
	if (len > (queue_max_hw_sectors(q) << 9))
@@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
	if (do_copy)
	if (do_copy)
		rq->rq_flags |= RQF_COPY_USER;
		rq->rq_flags |= RQF_COPY_USER;


	ret = blk_rq_append_bio(rq, bio);
	orig_bio = bio;
	ret = blk_rq_append_bio(rq, &bio);
	if (unlikely(ret)) {
	if (unlikely(ret)) {
		/* request is too big */
		/* request is too big */
		bio_put(bio);
		bio_put(orig_bio);
		return ret;
		return ret;
	}
	}


+1 −7
Original line number Original line Diff line number Diff line
@@ -2226,13 +2226,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
out_unlock:
out_unlock:
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(q->queue_lock);
out:
out:
	/*
	bio_set_flag(bio, BIO_THROTTLED);
	 * As multiple blk-throtls may stack in the same issue path, we
	 * don't want bios to leave with the flag set.  Clear the flag if
	 * being issued.
	 */
	if (!throttled)
		bio_clear_flag(bio, BIO_THROTTLED);


#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
	if (throttled || !td->track_bio_latency)
	if (throttled || !td->track_bio_latency)
+4 −2
Original line number Original line Diff line number Diff line
@@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
	unsigned i = 0;
	unsigned i = 0;
	bool bounce = false;
	bool bounce = false;
	int sectors = 0;
	int sectors = 0;
	bool passthrough = bio_is_passthrough(*bio_orig);


	bio_for_each_segment(from, *bio_orig, iter) {
	bio_for_each_segment(from, *bio_orig, iter) {
		if (i++ < BIO_MAX_PAGES)
		if (i++ < BIO_MAX_PAGES)
@@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
	if (!bounce)
	if (!bounce)
		return;
		return;


	if (sectors < bio_sectors(*bio_orig)) {
	if (!passthrough && sectors < bio_sectors(*bio_orig)) {
		bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
		bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
		bio_chain(bio, *bio_orig);
		bio_chain(bio, *bio_orig);
		generic_make_request(*bio_orig);
		generic_make_request(*bio_orig);
		*bio_orig = bio;
		*bio_orig = bio;
	}
	}
	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
			bounce_bio_set);


	bio_for_each_segment_all(to, bio, i) {
	bio_for_each_segment_all(to, bio, i) {
		struct page *page = to->bv_page;
		struct page *page = to->bv_page;
+24 −13
Original line number Original line Diff line number Diff line
@@ -100,9 +100,13 @@ struct kyber_hctx_data {
	unsigned int cur_domain;
	unsigned int cur_domain;
	unsigned int batching;
	unsigned int batching;
	wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
	wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
	struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
	atomic_t wait_index[KYBER_NUM_DOMAINS];
	atomic_t wait_index[KYBER_NUM_DOMAINS];
};
};


static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
			     void *key);

static int rq_sched_domain(const struct request *rq)
static int rq_sched_domain(const struct request *rq)
{
{
	unsigned int op = rq->cmd_flags;
	unsigned int op = rq->cmd_flags;
@@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)


	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
		INIT_LIST_HEAD(&khd->rqs[i]);
		INIT_LIST_HEAD(&khd->rqs[i]);
		init_waitqueue_func_entry(&khd->domain_wait[i],
					  kyber_domain_wake);
		khd->domain_wait[i].private = hctx;
		INIT_LIST_HEAD(&khd->domain_wait[i].entry);
		INIT_LIST_HEAD(&khd->domain_wait[i].entry);
		atomic_set(&khd->wait_index[i], 0);
		atomic_set(&khd->wait_index[i], 0);
	}
	}
@@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
	int nr;
	int nr;


	nr = __sbitmap_queue_get(domain_tokens);
	nr = __sbitmap_queue_get(domain_tokens);
	if (nr >= 0)
		return nr;


	/*
	/*
	 * If we failed to get a domain token, make sure the hardware queue is
	 * If we failed to get a domain token, make sure the hardware queue is
	 * run when one becomes available. Note that this is serialized on
	 * run when one becomes available. Note that this is serialized on
	 * khd->lock, but we still need to be careful about the waker.
	 * khd->lock, but we still need to be careful about the waker.
	 */
	 */
	if (list_empty_careful(&wait->entry)) {
	if (nr < 0 && list_empty_careful(&wait->entry)) {
		init_waitqueue_func_entry(wait, kyber_domain_wake);
		wait->private = hctx;
		ws = sbq_wait_ptr(domain_tokens,
		ws = sbq_wait_ptr(domain_tokens,
				  &khd->wait_index[sched_domain]);
				  &khd->wait_index[sched_domain]);
		khd->domain_ws[sched_domain] = ws;
		add_wait_queue(&ws->wait, wait);
		add_wait_queue(&ws->wait, wait);


		/*
		/*
		 * Try again in case a token was freed before we got on the wait
		 * Try again in case a token was freed before we got on the wait
		 * queue. The waker may have already removed the entry from the
		 * queue.
		 * wait queue, but list_del_init() is okay with that.
		 */
		 */
		nr = __sbitmap_queue_get(domain_tokens);
		nr = __sbitmap_queue_get(domain_tokens);
		if (nr >= 0) {
	}
			unsigned long flags;


			spin_lock_irqsave(&ws->wait.lock, flags);
	/*
	 * If we got a token while we were on the wait queue, remove ourselves
	 * from the wait queue to ensure that all wake ups make forward
	 * progress. It's possible that the waker already deleted the entry
	 * between the !list_empty_careful() check and us grabbing the lock, but
	 * list_del_init() is okay with that.
	 */
	if (nr >= 0 && !list_empty_careful(&wait->entry)) {
		ws = khd->domain_ws[sched_domain];
		spin_lock_irq(&ws->wait.lock);
		list_del_init(&wait->entry);
		list_del_init(&wait->entry);
			spin_unlock_irqrestore(&ws->wait.lock, flags);
		spin_unlock_irq(&ws->wait.lock);
		}
	}
	}

	return nr;
	return nr;
}
}


Loading