Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80f053b8 authored by Jilai Wang's avatar Jilai Wang
Browse files

msm: npu: refactor npu_map_buf/npu_unmap_buf functions



Mapped ion buffer list is changed to be managed by each npu client
and protected by a mutex to support concurrent use cases.

Change-Id: Ia81876689c2e943c28cd5ee35e56fb9196b99002
Signed-off-by: default avatarJilai Wang <jilaiw@codeaurora.org>
parent bd2d4557
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -169,7 +169,7 @@ struct npu_irq {
};

struct npu_device {
	struct mutex ctx_lock;
	struct mutex dev_lock;

	struct platform_device *pdev;

@@ -190,8 +190,6 @@ struct npu_device {

	struct npu_irq irq[NPU_MAX_IRQ];

	struct npu_ion_buf mapped_buffers;

	struct device *cb_device;

	struct npu_host_ctx host_ctx;
@@ -220,6 +218,7 @@ struct npu_client {

	struct mutex list_lock;
	struct list_head evt_list;
	struct list_head mapped_buffer_list;
};

/* -------------------------------------------------------------------------
+0 −1
Original line number Diff line number Diff line
@@ -305,7 +305,6 @@ void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
	pr_info("fw jobs execute finished count = %d\n", reg_val);
	reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
	pr_info("fw jobs aco parser debug = %d\n", reg_val);
	npu_dump_cal_state(npu_dev);
}

void npu_dump_cal_state(struct npu_device *npu_dev)
+4 −5
Original line number Diff line number Diff line
@@ -814,6 +814,7 @@ static int npu_open(struct inode *inode, struct file *file)
	init_waitqueue_head(&client->wait);
	mutex_init(&client->list_lock);
	INIT_LIST_HEAD(&client->evt_list);
	INIT_LIST_HEAD(&(client->mapped_buffer_list));
	file->private_data = client;

	return 0;
@@ -872,7 +873,6 @@ static int npu_get_info(struct npu_client *client, unsigned long arg)

static int npu_map_buf(struct npu_client *client, unsigned long arg)
{
	struct npu_device *npu_dev = client->npu_dev;
	struct msm_npu_map_buf_ioctl req;
	void __user *argp = (void __user *)arg;
	int ret = 0;
@@ -884,7 +884,7 @@ static int npu_map_buf(struct npu_client *client, unsigned long arg)
		return -EFAULT;
	}

	ret = npu_host_map_buf(npu_dev, &req);
	ret = npu_host_map_buf(client, &req);

	if (ret) {
		pr_err("npu_host_map_buf failed\n");
@@ -902,7 +902,6 @@ static int npu_map_buf(struct npu_client *client, unsigned long arg)

static int npu_unmap_buf(struct npu_client *client, unsigned long arg)
{
	struct npu_device *npu_dev = client->npu_dev;
	struct msm_npu_unmap_buf_ioctl req;
	void __user *argp = (void __user *)arg;
	int ret = 0;
@@ -914,7 +913,7 @@ static int npu_unmap_buf(struct npu_client *client, unsigned long arg)
		return -EFAULT;
	}

	ret = npu_host_unmap_buf(npu_dev, &req);
	ret = npu_host_unmap_buf(client, &req);

	if (ret) {
		pr_err("npu_host_unmap_buf failed\n");
@@ -1633,7 +1632,7 @@ static int npu_probe(struct platform_device *pdev)
		goto error_driver_init;
	}

	INIT_LIST_HEAD(&(npu_dev->mapped_buffers.list));
	mutex_init(&npu_dev->dev_lock);

	rc = npu_host_init(npu_dev);
	if (rc) {
+92 −45
Original line number Diff line number Diff line
@@ -151,69 +151,94 @@ int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev)
 * Functions - ION Memory
 * -------------------------------------------------------------------------
 */
static struct npu_ion_buf *npu_get_npu_ion_buffer(struct npu_device
	*npu_dev)
static struct npu_ion_buf *npu_alloc_npu_ion_buffer(struct npu_client
	*client, int buf_hdl, uint32_t size)
{
	struct npu_ion_buf *ret_val = 0;
	struct npu_ion_buf *ret_val = NULL, *tmp;
	struct list_head *pos = NULL;

	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		tmp = list_entry(pos, struct npu_ion_buf, list);
		if (tmp->fd == buf_hdl) {
			ret_val = tmp;
			break;
		}
	}

	if (ret_val) {
		/* mapped already, treat as invalid request */
		pr_err("ion buf %x has been mapped\n");
		ret_val = NULL;
	} else {
		ret_val = kmalloc(sizeof(struct npu_ion_buf), GFP_KERNEL);
	if (ret_val)
		list_add(&(ret_val->list), &(npu_dev->mapped_buffers.list));
		if (ret_val) {
			ret_val->fd = buf_hdl;
			ret_val->size = size;
			ret_val->iova = 0;
			list_add(&(ret_val->list),
				&(client->mapped_buffer_list));
		}
	}
	mutex_unlock(&client->list_lock);

	return ret_val;
}

static struct npu_ion_buf *npu_get_existing_ion_buffer(struct npu_device
	*npu_dev, int buf_hdl)
static struct npu_ion_buf *npu_get_npu_ion_buffer(struct npu_client
	*client, int buf_hdl)
{
	struct list_head *pos = 0;
	struct npu_ion_buf *npu_ion_buf = 0;
	struct list_head *pos = NULL;
	struct npu_ion_buf *ret_val = NULL, *tmp;

	list_for_each(pos, &(npu_dev->mapped_buffers.list)) {
		npu_ion_buf = list_entry(pos, struct npu_ion_buf, list);
		if (npu_ion_buf->fd == buf_hdl)
			return npu_ion_buf;
	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		tmp = list_entry(pos, struct npu_ion_buf, list);
		if (tmp->fd == buf_hdl) {
			ret_val = tmp;
			break;
		}
	}
	mutex_unlock(&client->list_lock);

	return NULL;
	return ret_val;
}

static struct npu_ion_buf *npu_clear_npu_ion_buffer(struct npu_device
	*npu_dev, int buf_hdl, uint64_t addr)
static void npu_free_npu_ion_buffer(struct npu_client
	*client, int buf_hdl)
{
	struct list_head *pos = 0;
	struct npu_ion_buf *npu_ion_buf = 0;
	struct list_head *pos = NULL;
	struct npu_ion_buf *npu_ion_buf = NULL;

	list_for_each(pos, &(npu_dev->mapped_buffers.list)) {
	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		npu_ion_buf = list_entry(pos, struct npu_ion_buf, list);
		if (npu_ion_buf->fd == buf_hdl &&
			npu_ion_buf->iova == addr) {
		if (npu_ion_buf->fd == buf_hdl) {
			list_del(&npu_ion_buf->list);
			return npu_ion_buf;
			kfree(npu_ion_buf);
			break;
		}
	}

	return NULL;
	mutex_unlock(&client->list_lock);
}

int npu_mem_map(struct npu_device *npu_dev, int buf_hdl, uint32_t size,
int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
	uint64_t *addr)
{
	int ret = 0;

	struct npu_ion_buf *ion_buf = npu_get_npu_ion_buffer(npu_dev);
	struct npu_device *npu_dev = client->npu_dev;
	struct npu_ion_buf *ion_buf = NULL;
	struct npu_smmu_ctx *smmu_ctx = &npu_dev->smmu_ctx;

	if (buf_hdl == 0)
		return -EINVAL;

	ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size);
	if (!ion_buf) {
		pr_err("%s no more table space\n", __func__);
		pr_err("%s fail to alloc npu_ion_buffer\n", __func__);
		ret = -ENOMEM;
		return ret;
	}
	ion_buf->fd = buf_hdl;
	ion_buf->size = size;

	if (ion_buf->fd == 0)
		return -EINVAL;

	smmu_ctx->attach_cnt++;

@@ -250,15 +275,16 @@ int npu_mem_map(struct npu_device *npu_dev, int buf_hdl, uint32_t size,
	ion_buf->size = ion_buf->table->sgl->dma_length;
map_end:
	if (ret)
		npu_mem_unmap(npu_dev, buf_hdl, 0);
		npu_mem_unmap(client, buf_hdl, 0);

	*addr = ion_buf->iova;
	return ret;
}

void npu_mem_invalidate(struct npu_device *npu_dev, int buf_hdl)
void npu_mem_invalidate(struct npu_client *client, int buf_hdl)
{
	struct npu_ion_buf *ion_buf = npu_get_existing_ion_buffer(npu_dev,
	struct npu_device *npu_dev = client->npu_dev;
	struct npu_ion_buf *ion_buf = npu_get_npu_ion_buffer(client,
		buf_hdl);

	if (!ion_buf)
@@ -268,29 +294,50 @@ void npu_mem_invalidate(struct npu_device *npu_dev, int buf_hdl)
			ion_buf->table->nents, DMA_BIDIRECTIONAL);
}

void npu_mem_unmap(struct npu_device *npu_dev, int buf_hdl, uint64_t addr)
bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr)
{
	struct npu_ion_buf *ion_buf = 0;
	struct list_head *pos = NULL;
	bool valid = false;

	/* clear entry and retrieve the corresponding buffer */
	ion_buf = npu_clear_npu_ion_buffer(npu_dev, buf_hdl, addr);
	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		ion_buf = list_entry(pos, struct npu_ion_buf, list);
		if (ion_buf->iova == addr) {
			valid = true;
			break;
		}
	}
	mutex_unlock(&client->list_lock);

	return valid;
}

void npu_mem_unmap(struct npu_client *client, int buf_hdl,  uint64_t addr)
{
	struct npu_device *npu_dev = client->npu_dev;
	struct npu_ion_buf *ion_buf = 0;

	/* clear entry and retrieve the corresponding buffer */
	ion_buf = npu_get_npu_ion_buffer(client, buf_hdl);
	if (!ion_buf) {
		pr_err("%s could not find buffer\n", __func__);
		return;
	}

	if (ion_buf->iova != addr)
		pr_warn("unmap address %lu doesn't match %lu\n", addr,
			ion_buf->iova);

	if (ion_buf->table)
		dma_buf_unmap_attachment(ion_buf->attachment, ion_buf->table,
			DMA_BIDIRECTIONAL);
	ion_buf->table = 0;
	if (ion_buf->dma_buf && ion_buf->attachment)
		dma_buf_detach(ion_buf->dma_buf, ion_buf->attachment);
	ion_buf->attachment = 0;
	if (ion_buf->dma_buf)
		dma_buf_put(ion_buf->dma_buf);
	ion_buf->dma_buf = 0;
	npu_dev->smmu_ctx.attach_cnt--;
	kfree(ion_buf);
	npu_free_npu_ion_buffer(client, buf_hdl);
}

/* -------------------------------------------------------------------------
+5 −3
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@
struct npu_device;
struct npu_ion_buf_t;
struct npu_host_ctx;
struct npu_client;
typedef irqreturn_t (*intr_hdlr_fn)(int32_t irq, void *ptr);
typedef void (*wq_hdlr_fn) (struct work_struct *work);

@@ -57,10 +58,11 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
	uint32_t size);

int npu_mem_map(struct npu_device *npu_dev, int buf_hdl, uint32_t size,
int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
	uint64_t *addr);
void npu_mem_unmap(struct npu_device *npu_dev, int buf_hdl, uint64_t addr);
void npu_mem_invalidate(struct npu_device *npu_dev, int buf_hdl);
void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr);
void npu_mem_invalidate(struct npu_client *client, int buf_hdl);
bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr);

void *npu_ipc_addr(void);
void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num);
Loading