Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9b03c1ea authored by Zhen Kong's avatar Zhen Kong
Browse files

msm: crypto: Add support for map and unmap ioctls



Expose map and unmap ioctls to userspace. These
ioctls can be used by userspace clients to map
ion memory and get virtual address to be consumed
by the Crypto hardware.

CRs-Fixed: 1072414
Change-Id: I141b984a65aefcfc676685dffe19e6c8d022f2e1
Signed-off-by: default avatarSonal Gupta <sonalg@codeaurora.org>
Signed-off-by: default avatarZhen Kong <zkong@codeaurora.org>
parent c603184d
Loading
Loading
Loading
Loading
+113 −0
Original line number Diff line number Diff line
@@ -254,6 +254,75 @@ static int compat_put_qcedev_cipher_op_req(
	return err;
}

static int compat_xfer_qcedev_map_buf_req(
			struct compat_qcedev_map_buf_req __user *data32,
			struct qcedev_map_buf_req __user *data, bool to_get)
{
	int rc = 0, i = 0, fd = -1;
	uint32_t fd_size, fd_offset, num_fds, buf_vaddr;

	if (to_get) {
		/* copy from compat struct */
		for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
			rc |= get_user(fd, &data32->fd[i]);
			rc |= put_user(fd, &data->fd[i]);
			rc |= get_user(fd_size, &data32->fd_size[i]);
			rc |= put_user(fd_size, &data->fd_size[i]);
			rc |= get_user(fd_offset, &data32->fd_offset[i]);
			rc |= put_user(fd_offset, &data->fd_offset[i]);
			rc |= get_user(buf_vaddr, &data32->buf_vaddr[i]);
			rc |= put_user(buf_vaddr, &data->buf_vaddr[i]);
		}

		rc |= get_user(num_fds, &data32->num_fds);
		rc |= put_user(num_fds, &data->num_fds);
	} else {
		/* copy to compat struct */
		for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
			rc |= get_user(fd, &data->fd[i]);
			rc |= put_user(fd, &data32->fd[i]);
			rc |= get_user(fd_size, &data->fd_size[i]);
			rc |= put_user(fd_size, &data32->fd_size[i]);
			rc |= get_user(fd_offset, &data->fd_offset[i]);
			rc |= put_user(fd_offset, &data32->fd_offset[i]);
			rc |= get_user(buf_vaddr, &data->buf_vaddr[i]);
			rc |= put_user(buf_vaddr, &data32->buf_vaddr[i]);
		}
		rc |= get_user(num_fds, &data->num_fds);
		rc |= put_user(num_fds, &data32->num_fds);
	}

	return rc;
}

static int compat_xfer_qcedev_unmap_buf_req(
			struct compat_qcedev_unmap_buf_req __user *data32,
			struct qcedev_unmap_buf_req __user *data, bool to_get)
{
	int i = 0, rc = 0, fd = -1;
	uint32_t num_fds;

	if (to_get) {
		/* copy from compat struct */
		for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
			rc |= get_user(fd, &data32->fd[i]);
			rc |= put_user(fd, &data->fd[i]);
		}
		rc |= get_user(num_fds, &data32->num_fds);
		rc |= put_user(num_fds, &data->num_fds);
	} else {
		/* copy to compat struct */
		for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
			rc |= get_user(fd, &data->fd[i]);
			rc |= put_user(fd, &data32->fd[i]);
		}
		rc |= get_user(num_fds, &data->num_fds);
		rc |= put_user(num_fds, &data32->num_fds);
	}
	return rc;
}


static int compat_get_qcedev_sha_op_req(
		struct compat_qcedev_sha_op_req __user *data32,
		struct qcedev_sha_op_req __user *data)
@@ -359,6 +428,10 @@ static unsigned int convert_cmd(unsigned int cmd)
		return QCEDEV_IOCTL_GET_SHA_REQ;
	case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
		return QCEDEV_IOCTL_GET_CMAC_REQ;
	case COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ:
		return QCEDEV_IOCTL_MAP_BUF_REQ;
	case COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ:
		return QCEDEV_IOCTL_UNMAP_BUF_REQ;
	default:
		return cmd;
	}
@@ -412,6 +485,46 @@ long compat_qcedev_ioctl(struct file *file,
		err = compat_put_qcedev_sha_op_req(data32, data);
		return ret ? ret : err;
	}
	case COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ: {
		struct compat_qcedev_map_buf_req __user *data32;
		struct qcedev_map_buf_req __user *data;
		int err;

		data32 = compat_ptr(arg);
		data = compat_alloc_user_space(sizeof(*data));
		if (!data)
			return -EINVAL;

		err = compat_xfer_qcedev_map_buf_req(data32, data, true);
		if (err)
			return err;

		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
		err = compat_xfer_qcedev_map_buf_req(data32, data, false);
		return ret ? ret : err;

		break;
	}
	case COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ: {
		struct compat_qcedev_unmap_buf_req __user *data32;
		struct qcedev_unmap_buf_req __user *data;
		int err;

		data32 = compat_ptr(arg);
		data = compat_alloc_user_space(sizeof(*data));
		if (!data)
			return -EINVAL;

		err = compat_xfer_qcedev_unmap_buf_req(data32, data, true);
		if (err)
			return err;

		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
		err = compat_xfer_qcedev_unmap_buf_req(data32, data, false);
		return ret ? ret : err;

		break;
	}
	default:
		return -ENOIOCTLCMD;
	}
+31 −1
Original line number Diff line number Diff line
@@ -151,6 +151,33 @@ struct compat_qcedev_sha_op_req {
	enum qcedev_sha_alg_enum		alg;
};

/**
 * struct compact_qcedev_map_buf_req - Holds the mapping request information
 * fd (IN):            Array of fds.
 * num_fds (IN):       Number of fds in fd[].
 * fd_size (IN):       Array of sizes corresponding to each fd in fd[].
 * fd_offset (IN):     Array of offset corresponding to each fd in fd[].
 * vaddr (OUT):        Array of mapped virtual address corresponding to
 *                     each fd in fd[].
 */
struct compat_qcedev_map_buf_req {
	compat_long_t	fd[QCEDEV_MAX_BUFFERS];
	compat_ulong_t	num_fds;
	compat_ulong_t	fd_size[QCEDEV_MAX_BUFFERS];
	compat_ulong_t	fd_offset[QCEDEV_MAX_BUFFERS];
	compat_u64      buf_vaddr[QCEDEV_MAX_BUFFERS];
};

/**
 * struct compat_qcedev_unmap_buf_req - Holds the hashing request information
 * fd (IN):	       Array of fds to unmap
 * num_fds (IN):       Number of fds in fd[].
 */
struct	compat_qcedev_unmap_buf_req {
	compat_long_t	fd[QCEDEV_MAX_BUFFERS];
	compat_ulong_t	num_fds;
};

struct file;
extern long compat_qcedev_ioctl(struct file *file,
			unsigned int cmd, unsigned long arg);
@@ -173,6 +200,9 @@ extern long compat_qcedev_ioctl(struct file *file,
	_IO(QCEDEV_IOC_MAGIC, 8)
#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ	\
	_IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req)

#define COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ	\
	_IOWR(QCEDEV_IOC_MAGIC, 10, struct compat_qcedev_map_buf_req)
#define COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ \
	_IOWR(QCEDEV_IOC_MAGIC, 11, struct compat_qcedev_unmap_buf_req)
#endif /* CONFIG_COMPAT */
#endif /* _UAPI_COMPAT_QCEDEV__H */
+71 −1
Original line number Diff line number Diff line
@@ -268,6 +268,9 @@ static int qcedev_open(struct inode *inode, struct file *file)
	file->private_data = handle;
	if (podev->platform_support.bus_scale_table != NULL)
		qcedev_ce_high_bw_req(podev, true);

	mutex_init(&handle->registeredbufs.lock);
	INIT_LIST_HEAD(&handle->registeredbufs.list);
	return 0;
}

@@ -1865,6 +1868,62 @@ static inline long qcedev_ioctl(struct file *file,
		}
		break;

	case QCEDEV_IOCTL_MAP_BUF_REQ:
		{
			unsigned long long vaddr = 0;
			struct qcedev_map_buf_req map_buf = { {0} };
			int i = 0;

			if (copy_from_user(&map_buf,
					(void __user *)arg, sizeof(map_buf)))
				return -EFAULT;

			for (i = 0; i < map_buf.num_fds; i++) {
				err = qcedev_check_and_map_buffer(handle,
						map_buf.fd[i],
						map_buf.fd_offset[i],
						map_buf.fd_size[i],
						&vaddr);
				if (err) {
					pr_err(
						"%s: err: failed to map fd(%d) - %d\n",
						__func__, map_buf.fd[i], err);
					return err;
				}
				map_buf.buf_vaddr[i] = vaddr;
				pr_info("%s: info: vaddr = %llx\n",
					__func__, vaddr);
			}

			if (copy_to_user((void __user *)arg, &map_buf,
					sizeof(map_buf)))
				return -EFAULT;
			break;
		}

	case QCEDEV_IOCTL_UNMAP_BUF_REQ:
		{
			struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
			int i = 0;

			if (copy_from_user(&unmap_buf,
					(void __user *)arg, sizeof(unmap_buf)))
				return -EFAULT;

			for (i = 0; i < unmap_buf.num_fds; i++) {
				err = qcedev_check_and_unmap_buffer(handle,
						unmap_buf.fd[i]);
				if (err) {
					pr_err(
						"%s: err: failed to unmap fd(%d) - %d\n",
						 __func__,
						unmap_buf.fd[i], err);
					return err;
				}
			}
			break;
		}

	default:
		return -ENOTTY;
	}
@@ -1976,16 +2035,27 @@ static int qcedev_probe_device(struct platform_device *pdev)
		podev->platform_support.sha_hmac = platform_support->sha_hmac;
	}

	podev->mem_client = qcedev_mem_new_client(MEM_ION);
	if (!podev->mem_client) {
		pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
		goto exit_qce_close;
	}

	rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
			NULL, &pdev->dev);
	if (rc) {
		pr_err("%s: err: of_platform_populate failed: %d\n",
			__func__, rc);
		goto exit_qce_close;
		goto exit_mem_new_client;
	}

	return 0;

exit_mem_new_client:
	if (podev->mem_client)
		qcedev_mem_delete_client(podev->mem_client);
	podev->mem_client = NULL;

exit_qce_close:
	if (handle)
		qce_close(handle);
+326 −0
Original line number Diff line number Diff line
@@ -139,3 +139,329 @@ int qcedev_parse_context_bank(struct platform_device *pdev)
	list_del(&cb->list);
	return rc;
}

struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
{
	struct qcedev_mem_client *mem_client = NULL;

	if (mtype != MEM_ION) {
		pr_err("%s: err: Mem type not supported\n", __func__);
		goto err;
	}

	mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
	if (!mem_client)
		goto err;
	mem_client->mtype = mtype;

	return mem_client;
err:
	return NULL;
}

void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
{
	kfree(mem_client);
}

static bool is_iommu_present(struct qcedev_handle *qce_hndl)
{
	return !list_empty(&qce_hndl->cntl->context_banks);
}

static struct context_bank_info *get_context_bank(
		struct qcedev_handle *qce_hndl, bool is_secure)
{
	struct qcedev_control *podev = qce_hndl->cntl;
	struct context_bank_info *cb = NULL, *match = NULL;

	list_for_each_entry(cb, &podev->context_banks, list) {
		if (cb->is_secure == is_secure) {
			match = cb;
			break;
		}
	}
	return match;
}

static int ion_map_buffer(struct qcedev_handle *qce_hndl,
		struct qcedev_mem_client *mem_client, int fd,
		unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
	unsigned long ion_flags = 0;
	int rc = 0;
	struct dma_buf *buf = NULL;
	struct dma_buf_attachment *attach = NULL;
	struct sg_table *table = NULL;
	struct context_bank_info *cb = NULL;

	buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(buf))
		return -EINVAL;

	rc = dma_buf_get_flags(buf, &ion_flags);
	if (rc) {
		pr_err("%s: err: failed to get ion flags: %d\n", __func__, rc);
		goto map_err;
	}

	if (is_iommu_present(qce_hndl)) {
		cb = get_context_bank(qce_hndl, ion_flags & ION_FLAG_SECURE);
		if (!cb) {
			pr_err("%s: err: failed to get context bank info\n",
				__func__);
			rc = -EIO;
			goto map_err;
		}

		/* Prepare a dma buf for dma on the given device */
		attach = dma_buf_attach(buf, cb->dev);
		if (IS_ERR_OR_NULL(attach)) {
			rc = PTR_ERR(attach) ?: -ENOMEM;
			pr_err("%s: err: failed to attach dmabuf\n", __func__);
			goto map_err;
		}

		/* Get the scatterlist for the given attachment */
		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
		table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
		if (IS_ERR_OR_NULL(table)) {
			rc = PTR_ERR(table) ?: -ENOMEM;
			pr_err("%s: err: failed to map table\n", __func__);
			goto map_table_err;
		}

		if (table->sgl) {
			binfo->ion_buf.iova = sg_dma_address(table->sgl);
			binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
			if (binfo->ion_buf.mapped_buf_size < fd_size) {
				pr_err("%s: err: mapping failed, size mismatch",
						__func__);
				rc = -ENOMEM;
				goto map_sg_err;
			}
		} else {
			pr_err("%s: err: sg list is NULL\n", __func__);
			rc = -ENOMEM;
			goto map_sg_err;
		}

		binfo->ion_buf.mapping_info.dev = cb->dev;
		binfo->ion_buf.mapping_info.mapping = cb->mapping;
		binfo->ion_buf.mapping_info.table = table;
		binfo->ion_buf.mapping_info.attach = attach;
		binfo->ion_buf.mapping_info.buf = buf;
		binfo->ion_buf.ion_fd = fd;
	} else {
		pr_err("%s: err: smmu not enabled\n", __func__);
		rc = -EIO;
		goto map_err;
	}

	return 0;

map_sg_err:
	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
map_table_err:
	dma_buf_detach(buf, attach);
map_err:
	dma_buf_put(buf);
	return rc;
}

static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
		struct qcedev_reg_buf_info *binfo)
{
	struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;

	if (is_iommu_present(qce_hndl)) {
		dma_buf_unmap_attachment(mapping_info->attach,
			mapping_info->table, DMA_BIDIRECTIONAL);
		dma_buf_detach(mapping_info->buf, mapping_info->attach);
		dma_buf_put(mapping_info->buf);

	}
	return 0;
}

static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
		struct qcedev_mem_client *mem_client, int fd,
		unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
	int rc = -1;

	switch (mem_client->mtype) {
	case MEM_ION:
		rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
		break;
	default:
		pr_err("%s: err: Mem type not supported\n", __func__);
		break;
	}

	if (rc)
		pr_err("%s: err: failed to map buffer\n", __func__);

	return rc;
}

static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
		struct qcedev_mem_client *mem_client,
		struct qcedev_reg_buf_info *binfo)
{
	int rc = -1;

	switch (mem_client->mtype) {
	case MEM_ION:
		rc = ion_unmap_buffer(qce_hndl, binfo);
		break;
	default:
		pr_err("%s: err: Mem type not supported\n", __func__);
		break;
	}

	if (rc)
		pr_err("%s: err: failed to unmap buffer\n", __func__);

	return rc;
}

int qcedev_check_and_map_buffer(void *handle,
		int fd, unsigned int offset, unsigned int fd_size,
		unsigned long long *vaddr)
{
	bool found = false;
	struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
	struct qcedev_mem_client *mem_client = NULL;
	struct qcedev_handle *qce_hndl = handle;
	int rc = 0;
	unsigned long mapped_size = 0;

	if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
		pr_err("%s: err: invalid input arguments\n", __func__);
		return -EINVAL;
	}

	if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
		pr_err("%s: err: invalid qcedev handle\n", __func__);
		return -EINVAL;
	}
	mem_client = qce_hndl->cntl->mem_client;

	if (mem_client->mtype != MEM_ION)
		return -EPERM;

	/* Check if the buffer fd is already mapped */
	mutex_lock(&qce_hndl->registeredbufs.lock);
	list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
		if (temp->ion_buf.ion_fd == fd) {
			found = true;
			*vaddr = temp->ion_buf.iova;
			mapped_size = temp->ion_buf.mapped_buf_size;
			atomic_inc(&temp->ref_count);
			break;
		}
	}
	mutex_unlock(&qce_hndl->registeredbufs.lock);

	/* If buffer fd is not mapped then create a fresh mapping */
	if (!found) {
		pr_debug("%s: info: ion fd not registered with driver\n",
			__func__);
		binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
		if (!binfo) {
			pr_err("%s: err: failed to allocate binfo\n",
				__func__);
			rc = -ENOMEM;
			goto error;
		}
		rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
							fd_size, binfo);
		if (rc) {
			pr_err("%s: err: failed to map fd (%d) error = %d\n",
				__func__, fd, rc);
			goto error;
		}

		*vaddr = binfo->ion_buf.iova;
		mapped_size = binfo->ion_buf.mapped_buf_size;
		atomic_inc(&binfo->ref_count);

		/* Add buffer mapping information to regd buffer list */
		mutex_lock(&qce_hndl->registeredbufs.lock);
		list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
		mutex_unlock(&qce_hndl->registeredbufs.lock);
	}

	/* Make sure the offset is within the mapped range */
	if (offset >= mapped_size) {
		pr_err(
			"%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
			__func__, offset, mapped_size, fd);
		rc = -ERANGE;
		goto unmap;
	}

	/* return the mapped virtual address adjusted by offset */
	*vaddr += offset;

	return 0;

unmap:
	if (!found)
		qcedev_unmap_buffer(handle, mem_client, binfo);

error:
	kfree(binfo);
	return rc;
}

int qcedev_check_and_unmap_buffer(void *handle, int fd)
{
	struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
	struct qcedev_mem_client *mem_client = NULL;
	struct qcedev_handle *qce_hndl = handle;
	bool found = false;

	if (!handle || fd < 0) {
		pr_err("%s: err: invalid input arguments\n", __func__);
		return -EINVAL;
	}

	if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
		pr_err("%s: err: invalid qcedev handle\n", __func__);
		return -EINVAL;
	}
	mem_client = qce_hndl->cntl->mem_client;

	if (mem_client->mtype != MEM_ION)
		return -EPERM;

	/* Check if the buffer fd is mapped and present in the regd list. */
	mutex_lock(&qce_hndl->registeredbufs.lock);
	list_for_each_entry_safe(binfo, dummy,
		&qce_hndl->registeredbufs.list, list) {
		if (binfo->ion_buf.ion_fd == fd) {
			found = true;
			atomic_dec(&binfo->ref_count);

			/* Unmap only if there are no more references */
			if (atomic_read(&binfo->ref_count) == 0) {
				qcedev_unmap_buffer(qce_hndl,
					mem_client, binfo);
				list_del(&binfo->list);
				kfree(binfo);
			}
			break;
		}
	}
	mutex_unlock(&qce_hndl->registeredbufs.lock);

	if (!found) {
		pr_err("%s: err: calling unmap on unknown fd %d\n",
			__func__, fd);
		return -EINVAL;
	}

	return 0;
}
+53 −0
Original line number Diff line number Diff line
@@ -15,8 +15,16 @@
#ifndef _DRIVERS_CRYPTO_PARSE_H_
#define _DRIVERS_CRYPTO_PARSE_H_

#include <asm/dma-iommu.h>
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/iommu.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/msm_ion.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/types.h>

struct context_bank_info {
	struct list_head list;
@@ -29,7 +37,52 @@ struct context_bank_info {
	struct dma_iommu_mapping *mapping;
};

enum qcedev_mem_type {
	MEM_ION,
};

struct qcedev_mem_client {
	enum qcedev_mem_type mtype;
};

struct dma_mapping_info {
	struct device *dev;
	struct dma_iommu_mapping *mapping;
	struct sg_table *table;
	struct dma_buf_attachment *attach;
	struct dma_buf *buf;
};

struct qcedev_ion_buf_info {
	struct dma_mapping_info mapping_info;
	dma_addr_t iova;
	unsigned long mapped_buf_size;
	int ion_fd;
};

struct qcedev_reg_buf_info {
	struct list_head list;
	union {
		struct qcedev_ion_buf_info ion_buf;
	};
	atomic_t ref_count;
};

struct qcedev_buffer_list {
	struct list_head list;
	struct mutex lock;
};

int qcedev_parse_context_bank(struct platform_device *pdev);
struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype);
void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client);
int qcedev_check_and_map_buffer(void *qce_hndl,
		int fd, unsigned int offset, unsigned int fd_size,
		unsigned long long *vaddr);
int qcedev_check_and_unmap_buffer(void *handle, int fd);

extern struct qcedev_reg_buf_info *global_binfo_in;
extern struct qcedev_reg_buf_info *global_binfo_out;
extern struct qcedev_reg_buf_info *global_binfo_res;
#endif
Loading