Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit be597e97 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'nvme-4.15' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph:

"A couple nvme fixes for 4.15:

 - expand the queue ready fix that we only had for RDMA to also cover FC and
   loop by moving it to common code (Sagi)
 - fix an array out of bounds in the PCIe HMB code (Minwoo Im)
 - two new device quirks (Jeff Lien and Kai-Heng Feng)
 - static checkers fixes (Keith Busch)
 - FC target refcount fix (James Smart)
 - A trivial spelling fix in new code (Colin Ian King)"
parents f341a4d3 8c97eecc
Loading
Loading
Loading
Loading
+8 −11
Original line number Diff line number Diff line
@@ -1449,18 +1449,18 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
	int srcu_idx, ret;
	u8 data[16] = { 0, };

	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
	if (unlikely(!ns))
		return -EWOULDBLOCK;

	put_unaligned_le64(key, &data[0]);
	put_unaligned_le64(sa_key, &data[8]);

	memset(&c, 0, sizeof(c));
	c.common.opcode = op;
	c.common.nsid = cpu_to_le32(head->ns_id);
	c.common.nsid = cpu_to_le32(ns->head->ns_id);
	c.common.cdw10[0] = cpu_to_le32(cdw10);

	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
	if (unlikely(!ns))
		ret = -EWOULDBLOCK;
	else
	ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
	nvme_put_ns_from_disk(head, srcu_idx);
	return ret;
@@ -2961,8 +2961,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)

static void nvme_ns_remove(struct nvme_ns *ns)
{
	struct nvme_ns_head *head = ns->head;

	if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
		return;

@@ -2980,7 +2978,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)

	mutex_lock(&ns->ctrl->subsys->lock);
	nvme_mpath_clear_current_path(ns);
	if (head)
	list_del_rcu(&ns->siblings);
	mutex_unlock(&ns->ctrl->subsys->lock);

@@ -2988,7 +2985,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
	list_del_init(&ns->list);
	mutex_unlock(&ns->ctrl->namespaces_mutex);

	synchronize_srcu(&head->srcu);
	synchronize_srcu(&ns->head->srcu);
	nvme_put_ns(ns);
}

+30 −0
Original line number Diff line number Diff line
@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);

static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
		struct request *rq)
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;

	/*
	 * We cannot accept any other command until the connect command has
	 * completed, so only allow connect to pass.
	 */
	if (!blk_rq_is_passthrough(rq) ||
	    cmd->common.opcode != nvme_fabrics_command ||
	    cmd->fabrics.fctype != nvme_fabrics_type_connect) {
		/*
		 * Reconnecting state means transport disruption, which can take
		 * a long time and even might fail permanently, fail fast to
		 * give upper layers a chance to failover.
		 * Deleting state means that the ctrl will never accept commands
		 * again, fail it permanently.
		 */
		if (ctrl->state == NVME_CTRL_RECONNECTING ||
		    ctrl->state == NVME_CTRL_DELETING) {
			nvme_req(rq)->status = NVME_SC_ABORT_REQ;
			return BLK_STS_IOERR;
		}
		return BLK_STS_RESOURCE; /* try again later */
	}

	return BLK_STS_OK;
}

#endif /* _NVME_FABRICS_H */
+18 −1
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@

enum nvme_fc_queue_flags {
	NVME_FC_Q_CONNECTED = (1 << 0),
	NVME_FC_Q_LIVE = (1 << 1),
};

#define NVMEFC_QUEUE_DELAY	3		/* ms units */
@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
	if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
		return;

	clear_bit(NVME_FC_Q_LIVE, &queue->flags);
	/*
	 * Current implementation never disconnects a single queue.
	 * It always terminates a whole association. So there is never
@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
	 */

	queue->connection_id = 0;
	clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
}

static void
@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
		if (ret)
			break;

		set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
	}

	return ret;
@@ -2320,6 +2323,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
	return BLK_STS_RESOURCE;
}

static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
		struct request *rq)
{
	if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
	return BLK_STS_OK;
}

static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
			const struct blk_mq_queue_data *bd)
@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
	u32 data_len;
	blk_status_t ret;

	ret = nvme_fc_is_ready(queue, rq);
	if (unlikely(ret))
		return ret;

	ret = nvme_setup_cmd(ns, rq, sqe);
	if (ret)
		return ret;
@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
	if (ret)
		goto out_disconnect_admin_queue;

	set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);

	/*
	 * Check controller capabilities
	 *
+1 −1
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
		bio->bi_opf |= REQ_NVME_MPATH;
		ret = direct_make_request(bio);
	} else if (!list_empty_careful(&head->list)) {
		dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
		dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");

		spin_lock_irq(&head->requeue_lock);
		bio_list_add(&head->requeue_list, bio);
+1 −1
Original line number Diff line number Diff line
@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
 * found empirically.
 */
#define NVME_QUIRK_DELAY_AMOUNT		2000
#define NVME_QUIRK_DELAY_AMOUNT		2300

enum nvme_ctrl_state {
	NVME_CTRL_NEW,
Loading