Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 69d3b8ac authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

nvme: synchronize access to ctrl->namespaces



Currently traversal and modification of ctrl->namespaces happens completely
unsynchronized, which can be fixed by the addition of a simple mutex.

Note: nvme_dev_ioctl will be handled in the next patch.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Acked-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 363c9aac
Loading
Loading
Loading
Loading
+17 −0
Original line number Original line Diff line number Diff line
@@ -1034,6 +1034,8 @@ static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
{
	struct nvme_ns *ns;
	struct nvme_ns *ns;


	lockdep_assert_held(&ctrl->namespaces_mutex);

	list_for_each_entry(ns, &ctrl->namespaces, list) {
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		if (ns->ns_id == nsid)
		if (ns->ns_id == nsid)
			return ns;
			return ns;
@@ -1049,6 +1051,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
	struct gendisk *disk;
	struct gendisk *disk;
	int node = dev_to_node(ctrl->dev);
	int node = dev_to_node(ctrl->dev);


	lockdep_assert_held(&ctrl->namespaces_mutex);

	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
	if (!ns)
	if (!ns)
		return;
		return;
@@ -1118,6 +1122,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
	bool kill = nvme_io_incapable(ns->ctrl) &&
	bool kill = nvme_io_incapable(ns->ctrl) &&
			!blk_queue_dying(ns->queue);
			!blk_queue_dying(ns->queue);


	lockdep_assert_held(&ns->ctrl->namespaces_mutex);

	if (kill)
	if (kill)
		blk_set_queue_dying(ns->queue);
		blk_set_queue_dying(ns->queue);
	if (ns->disk->flags & GENHD_FL_UP) {
	if (ns->disk->flags & GENHD_FL_UP) {
@@ -1188,6 +1194,8 @@ static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
	struct nvme_ns *ns, *next;
	struct nvme_ns *ns, *next;
	unsigned i;
	unsigned i;


	lockdep_assert_held(&ctrl->namespaces_mutex);

	for (i = 1; i <= nn; i++)
	for (i = 1; i <= nn; i++)
		nvme_validate_ns(ctrl, i);
		nvme_validate_ns(ctrl, i);


@@ -1205,6 +1213,7 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
	if (nvme_identify_ctrl(ctrl, &id))
	if (nvme_identify_ctrl(ctrl, &id))
		return;
		return;


	mutex_lock(&ctrl->namespaces_mutex);
	nn = le32_to_cpu(id->nn);
	nn = le32_to_cpu(id->nn);
	if (ctrl->vs >= NVME_VS(1, 1) &&
	if (ctrl->vs >= NVME_VS(1, 1) &&
	    !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
	    !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -1214,6 +1223,7 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
	__nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
	__nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
 done:
 done:
	list_sort(NULL, &ctrl->namespaces, ns_cmp);
	list_sort(NULL, &ctrl->namespaces, ns_cmp);
	mutex_unlock(&ctrl->namespaces_mutex);
	kfree(id);
	kfree(id);
}
}


@@ -1221,8 +1231,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
{
	struct nvme_ns *ns, *next;
	struct nvme_ns *ns, *next;


	mutex_lock(&ctrl->namespaces_mutex);
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
	list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
		nvme_ns_remove(ns);
		nvme_ns_remove(ns);
	mutex_unlock(&ctrl->namespaces_mutex);
}
}


static DEFINE_IDA(nvme_instance_ida);
static DEFINE_IDA(nvme_instance_ida);
@@ -1290,6 +1302,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
	int ret;
	int ret;


	INIT_LIST_HEAD(&ctrl->namespaces);
	INIT_LIST_HEAD(&ctrl->namespaces);
	mutex_init(&ctrl->namespaces_mutex);
	kref_init(&ctrl->kref);
	kref_init(&ctrl->kref);
	ctrl->dev = dev;
	ctrl->dev = dev;
	ctrl->ops = ops;
	ctrl->ops = ops;
@@ -1332,6 +1345,7 @@ void nvme_freeze_queues(struct nvme_ctrl *ctrl)
{
{
	struct nvme_ns *ns;
	struct nvme_ns *ns;


	mutex_lock(&ctrl->namespaces_mutex);
	list_for_each_entry(ns, &ctrl->namespaces, list) {
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		blk_mq_freeze_queue_start(ns->queue);
		blk_mq_freeze_queue_start(ns->queue);


@@ -1342,18 +1356,21 @@ void nvme_freeze_queues(struct nvme_ctrl *ctrl)
		blk_mq_cancel_requeue_work(ns->queue);
		blk_mq_cancel_requeue_work(ns->queue);
		blk_mq_stop_hw_queues(ns->queue);
		blk_mq_stop_hw_queues(ns->queue);
	}
	}
	mutex_unlock(&ctrl->namespaces_mutex);
}
}


void nvme_unfreeze_queues(struct nvme_ctrl *ctrl)
void nvme_unfreeze_queues(struct nvme_ctrl *ctrl)
{
{
	struct nvme_ns *ns;
	struct nvme_ns *ns;


	mutex_lock(&ctrl->namespaces_mutex);
	list_for_each_entry(ns, &ctrl->namespaces, list) {
	list_for_each_entry(ns, &ctrl->namespaces, list) {
		queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
		queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
		blk_mq_unfreeze_queue(ns->queue);
		blk_mq_unfreeze_queue(ns->queue);
		blk_mq_start_stopped_hw_queues(ns->queue, true);
		blk_mq_start_stopped_hw_queues(ns->queue, true);
		blk_mq_kick_requeue_list(ns->queue);
		blk_mq_kick_requeue_list(ns->queue);
	}
	}
	mutex_unlock(&ctrl->namespaces_mutex);
}
}


int __init nvme_core_init(void)
int __init nvme_core_init(void)
+1 −0
Original line number Original line Diff line number Diff line
@@ -69,6 +69,7 @@ struct nvme_ctrl {
	int instance;
	int instance;
	struct blk_mq_tag_set *tagset;
	struct blk_mq_tag_set *tagset;
	struct list_head namespaces;
	struct list_head namespaces;
	struct mutex namespaces_mutex;
	struct device *device;	/* char device */
	struct device *device;	/* char device */
	struct list_head node;
	struct list_head node;