Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 88058417 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20181201' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:

 - Single range elevator discard merge fix, that caused crashes (Ming)

 - Fix for a regression in O_DIRECT, where we could potentially lose the
   error value (Maximilian Heyne)

 - NVMe pull request from Christoph, with little fixes all over the map
   for NVMe.

* tag 'for-linus-20181201' of git://git.kernel.dk/linux-block:
  block: fix single range discard merge
  nvme-rdma: fix double freeing of async event data
  nvme: flush namespace scanning work just before removing namespaces
  nvme: warn when finding multi-port subsystems without multipathing enabled
  fs: fix lost error code in dio_complete
  nvme-pci: fix surprise removal
  nvme-fc: initialize nvme_req(rq)->ctrl after calling __nvme_fc_init_request()
  nvme: Free ctrl device name on init failure
parents c734b425 1c9b357c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -820,7 +820,7 @@ static struct request *attempt_merge(struct request_queue *q,

	req->__data_len += blk_rq_bytes(next);

	if (req_op(req) != REQ_OP_DISCARD)
	if (!blk_discard_mergable(req))
		elv_merge_requests(q, req, next);

	/*
+5 −3
Original line number Diff line number Diff line
@@ -3314,6 +3314,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
	struct nvme_ns *ns, *next;
	LIST_HEAD(ns_list);

	/* prevent racing with ns scanning */
	flush_work(&ctrl->scan_work);

	/*
	 * The dead states indicates the controller was not gracefully
	 * disconnected. In that case, we won't be able to flush any data while
@@ -3476,7 +3479,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
	nvme_mpath_stop(ctrl);
	nvme_stop_keep_alive(ctrl);
	flush_work(&ctrl->async_event_work);
	flush_work(&ctrl->scan_work);
	cancel_work_sync(&ctrl->fw_act_work);
	if (ctrl->ops->stop_ctrl)
		ctrl->ops->stop_ctrl(ctrl);
@@ -3585,7 +3587,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,

	return 0;
out_free_name:
	kfree_const(dev->kobj.name);
	kfree_const(ctrl->device->kobj.name);
out_release_instance:
	ida_simple_remove(&nvme_instance_ida, ctrl->instance);
out:
@@ -3607,7 +3609,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
	down_read(&ctrl->namespaces_rwsem);

	/* Forcibly unquiesce queues to avoid blocking dispatch */
	if (ctrl->admin_q)
	if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
		blk_mq_unquiesce_queue(ctrl->admin_q);

	list_for_each_entry(ns, &ctrl->namespaces, list)
+1 −1
Original line number Diff line number Diff line
@@ -1752,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
	int res;

	nvme_req(rq)->ctrl = &ctrl->ctrl;
	res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
	if (res)
		return res;
	op->op.fcp_req.first_sgl = &op->sgl[0];
	op->op.fcp_req.private = &op->priv[0];
	nvme_req(rq)->ctrl = &ctrl->ctrl;
	return res;
}

+3 −0
Original line number Diff line number Diff line
@@ -531,6 +531,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
		struct nvme_id_ctrl *id)
{
	if (ctrl->subsys->cmic & (1 << 3))
		dev_warn(ctrl->device,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
	return 0;
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+2 −0
Original line number Diff line number Diff line
@@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
	qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
	if (ib_dma_mapping_error(ibdev, qe->dma)) {
		kfree(qe->data);
		qe->data = NULL;
		return -ENOMEM;
	}

@@ -823,6 +824,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
out_free_async_qe:
	nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
		sizeof(struct nvme_command), DMA_TO_DEVICE);
	ctrl->async_event_sqe.data = NULL;
out_free_queue:
	nvme_rdma_free_queue(&ctrl->queues[0]);
	return error;
Loading