Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0fa8dc42 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A small collection of fixes that should go into this series. This
  contains:

   - NVMe pull request from Christoph, with various fixes for nvme
     proper and nvme-fc.

   - disable runtime PM for blk-mq for now.

     With scsi now defaulting to using blk-mq, this reared its head as
     an issue. Longer term we'll fix up runtime PM for blk-mq, for now
     just disable it to prevent a hang on laptop resume for some folks.

   - blk-mq CPU <-> hw queue map fix from Christoph.

   - xen/blkfront pull request from Konrad, with two small fixes for the
     blkfront driver.

   - a few fixups for nbd from Joseph.

   - a stable fix for pblk from Javier"

* 'for-linus' of git://git.kernel.dk/linux-block:
  lightnvm: pblk: advance bio according to lba index
  nvme: validate admin queue before unquiesce
  nbd: clear disconnected on reconnect
  nvme-pci: fix HMB size calculation
  nvme-fc: revise TRADDR parsing
  nvme-fc: address target disconnect race conditions in fcp io submit
  nvme: fabrics commands should use the fctype field for data direction
  nvme: also provide a UUID in the WWID sysfs attribute
  xen/blkfront: always allocate grants first from per-queue persistent grants
  xen-blkfront: fix mq start/stop race
  blk-mq: map queues to all present CPUs
  block: disable runtime-pm for blk-mq
  xen-blkfront: Fix handling of non-supported operations
  nbd: only set sndtimeo if we have a timeout set
  nbd: take tx_lock before disconnecting
  nbd: allow multiple disconnects to be sent
parents a2d48756 75cb8e93
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -3421,6 +3421,10 @@ EXPORT_SYMBOL(blk_finish_plug);
 */
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
	/* not support for RQF_PM and ->rpm_status in blk-mq yet */
	if (q->mq_ops)
		return;

	q->dev = dev;
	q->rpm_status = RPM_ACTIVE;
	pm_runtime_set_autosuspend_delay(q->dev, -1);
+2 −2
Original line number Diff line number Diff line
@@ -17,9 +17,9 @@
static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
{
	/*
	 * Non online CPU will be mapped to queue index 0.
	 * Non present CPU will be mapped to queue index 0.
	 */
	if (!cpu_online(cpu))
	if (!cpu_present(cpu))
		return 0;
	return cpu % nr_queues;
}
+13 −5
Original line number Diff line number Diff line
@@ -908,6 +908,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
			continue;
		}
		sk_set_memalloc(sock->sk);
		if (nbd->tag_set.timeout)
			sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
		atomic_inc(&config->recv_threads);
		refcount_inc(&nbd->config_refs);
@@ -922,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
		mutex_unlock(&nsock->tx_lock);
		sockfd_put(old);

		clear_bit(NBD_DISCONNECTED, &config->runtime_flags);

		/* We take the tx_mutex in an error path in the recv_work, so we
		 * need to queue_work outside of the tx_mutex.
		 */
@@ -978,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd)
	int i, ret;

	for (i = 0; i < config->num_connections; i++) {
		struct nbd_sock *nsock = config->socks[i];

		iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
		mutex_lock(&nsock->tx_lock);
		ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
		if (ret <= 0)
			dev_err(disk_to_dev(nbd->disk),
				"Send disconnect failed %d\n", ret);
		mutex_unlock(&nsock->tx_lock);
	}
}

@@ -991,8 +998,7 @@ static int nbd_disconnect(struct nbd_device *nbd)
	struct nbd_config *config = nbd->config;

	dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
	if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
			      &config->runtime_flags))
	set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
	send_disconnects(nbd);
	return 0;
}
@@ -1074,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd)
			return -ENOMEM;
		}
		sk_set_memalloc(config->socks[i]->sock->sk);
		config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
		if (nbd->tag_set.timeout)
			config->socks[i]->sock->sk->sk_sndtimeo =
				nbd->tag_set.timeout;
		atomic_inc(&config->recv_threads);
		refcount_inc(&nbd->config_refs);
		INIT_WORK(&args->work, recv_work);
+14 −11
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ struct blk_shadow {
};

struct blkif_req {
	int	error;
	blk_status_t	error;
};

static inline struct blkif_req *blkif_req(struct request *rq)
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
	 * existing persistent grants, or if we have to get new grants,
	 * as there are not sufficiently many free.
	 */
	bool new_persistent_gnts = false;
	struct scatterlist *sg;
	int num_sg, max_grefs, num_grant;

@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
		 */
		max_grefs += INDIRECT_GREFS(max_grefs);

	/*
	 * We have to reserve 'max_grefs' grants because persistent
	 * grants are shared by all rings.
	 */
	if (max_grefs > 0)
		if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
	/* Check if we have enough persistent grants to allocate a requests */
	if (rinfo->persistent_gnts_c < max_grefs) {
		new_persistent_gnts = true;

		if (gnttab_alloc_grant_references(
		    max_grefs - rinfo->persistent_gnts_c,
		    &setup.gref_head) < 0) {
			gnttab_request_free_callback(
				&rinfo->callback,
				blkif_restart_queue_callback,
				rinfo,
				max_grefs);
				max_grefs - rinfo->persistent_gnts_c);
			return 1;
		}
	}

	/* Fill out a communications ring structure. */
	id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
	if (unlikely(require_extra_req))
		rinfo->shadow[extra_id].req = *extra_ring_req;

	if (max_grefs > 0)
	if (new_persistent_gnts)
		gnttab_free_grant_references(setup.gref_head);

	return 0;
@@ -906,8 +909,8 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
	return BLK_STS_IOERR;

out_busy:
	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
	blk_mq_stop_hw_queue(hctx);
	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
	return BLK_STS_RESOURCE;
}

@@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
				       info->gd->disk_name, op_name(bret->operation));
				blkif_req(req)->error = -EOPNOTSUPP;
				blkif_req(req)->error = BLK_STS_NOTSUPP;
			}
			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
				     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
+2 −2
Original line number Diff line number Diff line
@@ -657,7 +657,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
 * be directed to disk.
 */
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
			struct ppa_addr ppa, int bio_iter)
			struct ppa_addr ppa, int bio_iter, bool advanced_bio)
{
	struct pblk *pblk = container_of(rb, struct pblk, rwb);
	struct pblk_rb_entry *entry;
@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
	 * filled with data from the cache). If part of the data resides on the
	 * media, we will read later on
	 */
	if (unlikely(!bio->bi_iter.bi_idx))
	if (unlikely(!advanced_bio))
		bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);

	data = bio_data(bio);
Loading