Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c7d4945 authored by Matthew Wilcox's avatar Matthew Wilcox
Browse files

NVMe: Change the definition of nvme_user_io



The read and write commands don't define a 'result', so there's no need
to copy it back to userspace.

Remove the ability of the ioctl to submit commands to a different
namespace; it's just asking for trouble, and the use case I have in mind
will be addressed througha  different ioctl in the future.  That removes
the need for both the block_shift and nsid arguments.

Check that the opcode is one of 'read' or 'write'.  Future opcodes may
be added in the future, but we will need a different structure definition
for them.

The nblocks field is redefined to be 0-based.  This allows the user to
request the full 65536 blocks.

Don't byteswap the reftag, apptag and appmask.  Martin Petersen tells
me these are calculated in big-endian and are transmitted to the device
in big-endian.

Signed-off-by: default avatarMatthew Wilcox <matthew.r.wilcox@intel.com>
parent 9d4af1b7
Loading
Loading
Loading
Loading
+17 −10
Original line number Diff line number Diff line
@@ -1035,29 +1035,37 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
	struct nvme_user_io io;
	struct nvme_command c;
	unsigned length;
	u32 result;
	int nents, status;
	struct scatterlist *sg;
	struct nvme_prps *prps;

	if (copy_from_user(&io, uio, sizeof(io)))
		return -EFAULT;
	length = io.nblocks << io.block_shift;
	nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length, &sg);
	length = (io.nblocks + 1) << ns->lba_shift;

	switch (io.opcode) {
	case nvme_cmd_write:
	case nvme_cmd_read:
		nents = nvme_map_user_pages(dev, io.opcode & 1, io.addr,
								length, &sg);
	default:
		return -EFAULT;
	}

	if (nents < 0)
		return nents;

	memset(&c, 0, sizeof(c));
	c.rw.opcode = io.opcode;
	c.rw.flags = io.flags;
	c.rw.nsid = cpu_to_le32(io.nsid);
	c.rw.nsid = cpu_to_le32(ns->ns_id);
	c.rw.slba = cpu_to_le64(io.slba);
	c.rw.length = cpu_to_le16(io.nblocks - 1);
	c.rw.length = cpu_to_le16(io.nblocks);
	c.rw.control = cpu_to_le16(io.control);
	c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
	c.rw.reftag = cpu_to_le32(io.reftag);	/* XXX: endian? */
	c.rw.apptag = cpu_to_le16(io.apptag);
	c.rw.appmask = cpu_to_le16(io.appmask);
	c.rw.reftag = io.reftag;
	c.rw.apptag = io.apptag;
	c.rw.appmask = io.appmask;
	/* XXX: metadata */
	prps = nvme_setup_prps(dev, &c.common, sg, length);

@@ -1069,11 +1077,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
	 * additional races since q_lock already protects against other CPUs.
	 */
	put_nvmeq(nvmeq);
	status = nvme_submit_sync_cmd(nvmeq, &c, &result, IO_TIMEOUT);
	status = nvme_submit_sync_cmd(nvmeq, &c, NULL, IO_TIMEOUT);

	nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, sg, nents);
	nvme_free_prps(dev, prps);
	put_user(result, &uio->result);
	return status;
}

+3 −5
Original line number Diff line number Diff line
@@ -373,17 +373,15 @@ struct nvme_user_io {
	__u8	opcode;
	__u8	flags;
	__u16	control;
	__u32	nsid;
	__u16	nblocks;
	__u16	rsvd;
	__u64	metadata;
	__u64	addr;
	__u64	slba;
	__u16	nblocks;
	__u16	block_shift;
	__u32	dsmgmt;
	__u32	reftag;
	__u16	apptag;
	__u16	appmask;
	__u32	result;
};

struct nvme_dlfw {
@@ -395,7 +393,7 @@ struct nvme_dlfw {
#define NVME_IOCTL_IDENTIFY_NS	_IOW('N', 0x40, struct nvme_id_ns)
#define NVME_IOCTL_IDENTIFY_CTRL _IOW('N', 0x41, struct nvme_id_ctrl)
#define NVME_IOCTL_GET_RANGE_TYPE _IOW('N', 0x42, struct nvme_lba_range_type)
#define NVME_IOCTL_SUBMIT_IO	_IOWR('N', 0x43, struct nvme_user_io)
#define NVME_IOCTL_SUBMIT_IO	_IOW('N', 0x43, struct nvme_user_io)
#define NVME_IOCTL_DOWNLOAD_FW	_IOW('N', 0x44, struct nvme_dlfw)
#define NVME_IOCTL_ACTIVATE_FW	_IO('N', 0x45)