Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4884f8bf authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'nvme-4.19' of git://git.infradead.org/nvme into for-4.19/block

Pull NVMe updates from Christoph:

"This should be the last round of NVMe updates before the 4.19 merge
 window opens.  It conatins support for write protected (aka read-only)
 namespaces from Chaitanya, two ANA fixes from Hannes and a fabrics
 fix from Tal Shorer."

* 'nvme-4.19' of git://git.infradead.org/nvme:
  nvme-fabrics: fix ctrl_loss_tmo < 0 to reconnect forever
  nvmet: add ns write protect support
  nvme: set gendisk read only based on nsattr
  nvme.h: add support for ns write protect definitions
  nvme.h: fixup ANA group descriptor format
  nvme: fixup crash on failed discovery
parents cbb751c0 66414e80
Loading
Loading
Loading
Loading
+6 −0
Original line number Original line Diff line number Diff line
@@ -1484,6 +1484,12 @@ static void nvme_update_disk_info(struct gendisk *disk,


	set_capacity(disk, capacity);
	set_capacity(disk, capacity);
	nvme_config_discard(ns);
	nvme_config_discard(ns);

	if (id->nsattr & (1 << 0))
		set_disk_ro(disk, true);
	else
		set_disk_ro(disk, false);

	blk_mq_unfreeze_queue(disk->queue);
	blk_mq_unfreeze_queue(disk->queue);
}
}


+1 −1
Original line number Original line Diff line number Diff line
@@ -474,7 +474,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);


bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
{
{
	if (ctrl->opts->max_reconnects != -1 &&
	if (ctrl->opts->max_reconnects == -1 ||
	    ctrl->nr_reconnects < ctrl->opts->max_reconnects)
	    ctrl->nr_reconnects < ctrl->opts->max_reconnects)
		return true;
		return true;


+1 −1
Original line number Original line Diff line number Diff line
@@ -22,7 +22,7 @@ MODULE_PARM_DESC(multipath,


inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
{
	return multipath && (ctrl->subsys->cmic & (1 << 3));
	return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
}
}


/*
/*
+76 −0
Original line number Original line Diff line number Diff line
@@ -372,6 +372,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
	id->psd[0].entry_lat = cpu_to_le32(0x10);
	id->psd[0].entry_lat = cpu_to_le32(0x10);
	id->psd[0].exit_lat = cpu_to_le32(0x4);
	id->psd[0].exit_lat = cpu_to_le32(0x4);


	id->nwpc = 1 << 0; /* write protect and no write protect */

	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));


	kfree(id);
	kfree(id);
@@ -433,6 +435,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)


	id->lbaf[0].ds = ns->blksize_shift;
	id->lbaf[0].ds = ns->blksize_shift;


	if (ns->readonly)
		id->nsattr |= (1 << 0);
	nvmet_put_namespace(ns);
	nvmet_put_namespace(ns);
done:
done:
	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -545,6 +549,52 @@ static void nvmet_execute_abort(struct nvmet_req *req)
	nvmet_req_complete(req, 0);
	nvmet_req_complete(req, 0);
}
}


static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
{
	u16 status;

	if (req->ns->file)
		status = nvmet_file_flush(req);
	else
		status = nvmet_bdev_flush(req);

	if (status)
		pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
	return status;
}

static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
	u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
	u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;

	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
	if (unlikely(!req->ns))
		return status;

	mutex_lock(&subsys->lock);
	switch (write_protect) {
	case NVME_NS_WRITE_PROTECT:
		req->ns->readonly = true;
		status = nvmet_write_protect_flush_sync(req);
		if (status)
			req->ns->readonly = false;
		break;
	case NVME_NS_NO_WRITE_PROTECT:
		req->ns->readonly = false;
		status = 0;
		break;
	default:
		break;
	}

	if (!status)
		nvmet_ns_changed(subsys, req->ns->nsid);
	mutex_unlock(&subsys->lock);
	return status;
}

static void nvmet_execute_set_features(struct nvmet_req *req)
static void nvmet_execute_set_features(struct nvmet_req *req)
{
{
	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
@@ -575,6 +625,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
	case NVME_FEAT_HOST_ID:
	case NVME_FEAT_HOST_ID:
		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
		status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
		break;
		break;
	case NVME_FEAT_WRITE_PROTECT:
		status = nvmet_set_feat_write_protect(req);
		break;
	default:
	default:
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		break;
		break;
@@ -583,6 +636,26 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
	nvmet_req_complete(req, status);
	nvmet_req_complete(req, status);
}
}


static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{
	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
	u32 result;

	req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
	if (!req->ns)
		return NVME_SC_INVALID_NS | NVME_SC_DNR;

	mutex_lock(&subsys->lock);
	if (req->ns->readonly == true)
		result = NVME_NS_WRITE_PROTECT;
	else
		result = NVME_NS_NO_WRITE_PROTECT;
	nvmet_set_result(req, result);
	mutex_unlock(&subsys->lock);

	return 0;
}

static void nvmet_execute_get_features(struct nvmet_req *req)
static void nvmet_execute_get_features(struct nvmet_req *req)
{
{
	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
	struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
@@ -634,6 +707,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
		status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
				sizeof(req->sq->ctrl->hostid));
				sizeof(req->sq->ctrl->hostid));
		break;
		break;
	case NVME_FEAT_WRITE_PROTECT:
		status = nvmet_get_feat_write_protect(req);
		break;
	default:
	default:
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
		break;
		break;
+19 −1
Original line number Original line Diff line number Diff line
@@ -180,7 +180,7 @@ static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
	mutex_unlock(&ctrl->lock);
	mutex_unlock(&ctrl->lock);
}
}


static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{
{
	struct nvmet_ctrl *ctrl;
	struct nvmet_ctrl *ctrl;


@@ -609,6 +609,21 @@ static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
	return 0;
	return 0;
}
}


static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
{
	if (unlikely(req->ns->readonly)) {
		switch (req->cmd->common.opcode) {
		case nvme_cmd_read:
		case nvme_cmd_flush:
			break;
		default:
			return NVME_SC_NS_WRITE_PROTECTED;
		}
	}

	return 0;
}

static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{
{
	struct nvme_command *cmd = req->cmd;
	struct nvme_command *cmd = req->cmd;
@@ -622,6 +637,9 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
	if (unlikely(!req->ns))
	if (unlikely(!req->ns))
		return NVME_SC_INVALID_NS | NVME_SC_DNR;
		return NVME_SC_INVALID_NS | NVME_SC_DNR;
	ret = nvmet_check_ana_state(req->port, req->ns);
	ret = nvmet_check_ana_state(req->port, req->ns);
	if (unlikely(ret))
		return ret;
	ret = nvmet_io_cmd_check_access(req);
	if (unlikely(ret))
	if (unlikely(ret))
		return ret;
		return ret;


Loading