Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8714f8f5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A small collection of fixes for the current series.  This contains:

   - Two fixes for xen-blkfront, from Bob Liu.

   - A bug fix for NVMe, releasing only the specific resources we
     requested.

   - Fix for a debugfs flags entry for nbd, from Josef.

   - Plug fix from Omar, fixing up a case of code being switched between
     two functions.

   - A missing bio_put() for the new discard callers of
     submit_bio_wait(), fixing a regression causing a leak of the bio.
     From Shaun.

   - Improve dirty limit calculation precision in the writeback code,
     fixing a case where setting a limit lower than 1% of memory would
     end up being zero.  From Tejun"

* 'for-linus' of git://git.kernel.dk/linux-block:
  NVMe: Only release requested regions
  xen-blkfront: fix resume issues after a migration
  xen-blkfront: don't call talk_to_blkback when already connected to blkback
  nbd: pass the nbd pointer for flags debugfs
  block: missing bio_put following submit_bio_wait
  blk-mq: really fix plug list flushing for nomerge queues
  writeback: use higher precision calculation in domain_dirty_limits()
parents 3a7c114d edb50a54
Loading
Loading
Loading
Loading
+9 −3
Original line number Diff line number Diff line
@@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		ret = submit_bio_wait(type, bio);
		if (ret == -EOPNOTSUPP)
			ret = 0;
		bio_put(bio);
	}
	blk_finish_plug(&plug);

@@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
		}
	}

	if (bio)
	if (bio) {
		ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
		bio_put(bio);
	}
	return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
		}
	}

	if (bio)
		return submit_bio_wait(WRITE, bio);
	if (bio) {
		ret = submit_bio_wait(WRITE, bio);
		bio_put(bio);
		return ret;
	}
	return 0;
}

+8 −9
Original line number Diff line number Diff line
@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

	blk_queue_split(q, &bio, q->bio_split);

	if (!is_flush_fua && !blk_queue_nomerges(q)) {
		if (blk_attempt_plug_merge(q, bio, &request_count,
					   &same_queue_rq))
	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
		return BLK_QC_T_NONE;
	} else
		request_count = blk_plug_queued_count(q);

	rq = blk_mq_map_request(q, bio, &data);
	if (unlikely(!rq))
@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)

	blk_queue_split(q, &bio, q->bio_split);

	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count, NULL))
	if (!is_flush_fua && !blk_queue_nomerges(q)) {
		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
			return BLK_QC_T_NONE;
	} else
		request_count = blk_plug_queued_count(q);

	rq = blk_mq_map_request(q, bio, &data);
	if (unlikely(!rq))
+1 −1
Original line number Diff line number Diff line
@@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
	debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
	debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
	debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
	debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
	debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);

	return 0;
}
+22 −13
Original line number Diff line number Diff line
@@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
			  const struct blk_mq_queue_data *qd)
{
	unsigned long flags;
	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
	int qid = hctx->queue_num;
	struct blkfront_info *info = hctx->queue->queuedata;
	struct blkfront_ring_info *rinfo = NULL;

	BUG_ON(info->nr_rings <= qid);
	rinfo = &info->rinfo[qid];
	blk_mq_start_request(qd->rq);
	spin_lock_irqsave(&rinfo->ring_lock, flags);
	if (RING_FULL(&rinfo->ring))
@@ -901,20 +905,9 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
	return BLK_MQ_RQ_QUEUE_BUSY;
}

static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			    unsigned int index)
{
	struct blkfront_info *info = (struct blkfront_info *)data;

	BUG_ON(info->nr_rings <= index);
	hctx->driver_data = &info->rinfo[index];
	return 0;
}

static struct blk_mq_ops blkfront_mq_ops = {
	.queue_rq = blkif_queue_rq,
	.map_queue = blk_mq_map_queue,
	.init_hctx = blk_mq_init_hctx,
};

static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
		return PTR_ERR(rq);
	}

	rq->queuedata = info;
	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);

	if (info->feature_discard) {
@@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
		return err;

	err = talk_to_blkback(dev, info);
	if (!err)
		blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);

	/*
	 * We have to wait for the backend to switch to
@@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev,
		break;

	case XenbusStateConnected:
		if (dev->state != XenbusStateInitialised) {
		/*
		 * talk_to_blkback sets state to XenbusStateInitialised
		 * and blkfront_connect sets it to XenbusStateConnected
		 * (if connection went OK).
		 *
		 * If the backend (or toolstack) decides to poke at backend
		 * state (and re-trigger the watch by setting the state repeatedly
		 * to XenbusStateConnected (4)) we need to deal with this.
		 * This is allowed as this is used to communicate to the guest
		 * that the size of disk has changed!
		 */
		if ((dev->state != XenbusStateInitialised) &&
		    (dev->state != XenbusStateConnected)) {
			if (talk_to_blkback(dev, info))
				break;
		}

		blkfront_connect(info);
		break;

+7 −2
Original line number Diff line number Diff line
@@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)

static void nvme_dev_unmap(struct nvme_dev *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev->dev);
	int bars;

	if (dev->bar)
		iounmap(dev->bar);
	pci_release_regions(to_pci_dev(dev->dev));

	bars = pci_select_bars(pdev, IORESOURCE_MEM);
	pci_release_selected_regions(pdev, bars);
}

static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)

       return 0;
  release:
       pci_release_regions(pdev);
       pci_release_selected_regions(pdev, bars);
       return -ENODEV;
}

Loading