Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e279040 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: npu: refactor npu_map_buf/npu_unmap_buf functions"

parents 4de64b85 80f053b8
Loading
Loading
Loading
Loading
+15 −3
Original line number Diff line number Diff line
@@ -169,7 +169,7 @@ struct npu_irq {
};

struct npu_device {
	struct mutex ctx_lock;
	struct mutex dev_lock;

	struct platform_device *pdev;

@@ -190,8 +190,6 @@ struct npu_device {

	struct npu_irq irq[NPU_MAX_IRQ];

	struct npu_ion_buf mapped_buffers;

	struct device *cb_device;

	struct npu_host_ctx host_ctx;
@@ -208,6 +206,20 @@ struct npu_device {
	uint32_t execute_v2_flag;
};

struct npu_kevent {
	struct list_head list;
	struct msm_npu_event evt;
	uint64_t reserved[4];
};

struct npu_client {
	struct npu_device *npu_dev;
	wait_queue_head_t wait;

	struct mutex list_lock;
	struct list_head evt_list;
	struct list_head mapped_buffer_list;
};

/* -------------------------------------------------------------------------
 * Function Prototypes
+0 −1
Original line number Diff line number Diff line
@@ -305,7 +305,6 @@ void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
	pr_info("fw jobs execute finished count = %d\n", reg_val);
	reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
	pr_info("fw jobs aco parser debug = %d\n", reg_val);
	npu_dump_cal_state(npu_dev);
}

void npu_dump_cal_state(struct npu_device *npu_dev)
+152 −34
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/poll.h>
#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
#include <linux/soc/qcom/llcc-qcom.h>
@@ -75,16 +76,25 @@ static int npu_set_cur_state(struct thermal_cooling_device *cdev,
				unsigned long state);
static int npu_open(struct inode *inode, struct file *file);
static int npu_close(struct inode *inode, struct file *file);
static int npu_get_info(struct npu_device *npu_dev, unsigned long arg);
static int npu_map_buf(struct npu_device *npu_dev, unsigned long arg);
static int npu_unmap_buf(struct npu_device *npu_dev, unsigned long arg);
static int npu_load_network(struct npu_device *npu_dev, unsigned long arg);
static int npu_load_network_v2(struct npu_device *npu_dev, unsigned long arg);
static int npu_unload_network(struct npu_device *npu_dev, unsigned long arg);
static int npu_exec_network(struct npu_device *npu_dev, unsigned long arg);
static int npu_exec_network_v2(struct npu_device *npu_dev, unsigned long arg);
static int npu_get_info(struct npu_client *client, unsigned long arg);
static int npu_map_buf(struct npu_client *client, unsigned long arg);
static int npu_unmap_buf(struct npu_client *client,
	unsigned long arg);
static int npu_load_network(struct npu_client *client,
	unsigned long arg);
static int npu_load_network_v2(struct npu_client *client,
	unsigned long arg);
static int npu_unload_network(struct npu_client *client,
	unsigned long arg);
static int npu_exec_network(struct npu_client *client,
	unsigned long arg);
static int npu_exec_network_v2(struct npu_client *client,
	unsigned long arg);
static int npu_receive_event(struct npu_client *client,
	unsigned long arg);
static long npu_ioctl(struct file *file, unsigned int cmd,
					unsigned long arg);
static unsigned int npu_poll(struct file *filp, struct poll_table_struct *p);
static int npu_parse_dt_clock(struct npu_device *npu_dev);
static int npu_parse_dt_regulator(struct npu_device *npu_dev);
static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
@@ -207,6 +217,7 @@ static const struct file_operations npu_fops = {
#ifdef CONFIG_COMPAT
	 .compat_ioctl = npu_ioctl,
#endif
	.poll = npu_poll,
};

static const struct thermal_cooling_device_ops npu_cooling_ops = {
@@ -793,14 +804,36 @@ static int npu_open(struct inode *inode, struct file *file)
{
	struct npu_device *npu_dev = container_of(inode->i_cdev,
		struct npu_device, cdev);
	struct npu_client *client;

	file->private_data = npu_dev;
	client = kmalloc(sizeof(*client), GFP_KERNEL);
	if (!client)
		return -ENOMEM;

	client->npu_dev = npu_dev;
	init_waitqueue_head(&client->wait);
	mutex_init(&client->list_lock);
	INIT_LIST_HEAD(&client->evt_list);
	INIT_LIST_HEAD(&(client->mapped_buffer_list));
	file->private_data = client;

	return 0;
}

static int npu_close(struct inode *inode, struct file *file)
{
	struct npu_client *client = file->private_data;
	struct npu_kevent *kevent;

	while (!list_empty(&client->evt_list)) {
		kevent = list_first_entry(&client->evt_list,
			struct npu_kevent, list);
		list_del(&kevent->list);
		kfree(kevent);
	}

	mutex_destroy(&client->list_lock);
	kfree(client);
	return 0;
}

@@ -808,8 +841,9 @@ static int npu_close(struct inode *inode, struct file *file)
 * IOCTL Implementations
 * -------------------------------------------------------------------------
 */
static int npu_get_info(struct npu_device *npu_dev, unsigned long arg)
static int npu_get_info(struct npu_client *client, unsigned long arg)
{
	struct npu_device *npu_dev = client->npu_dev;
	struct msm_npu_get_info_ioctl req;
	void __user *argp = (void __user *)arg;
	int ret = 0;
@@ -837,7 +871,7 @@ static int npu_get_info(struct npu_device *npu_dev, unsigned long arg)
	return 0;
}

static int npu_map_buf(struct npu_device *npu_dev, unsigned long arg)
static int npu_map_buf(struct npu_client *client, unsigned long arg)
{
	struct msm_npu_map_buf_ioctl req;
	void __user *argp = (void __user *)arg;
@@ -850,7 +884,7 @@ static int npu_map_buf(struct npu_device *npu_dev, unsigned long arg)
		return -EFAULT;
	}

	ret = npu_host_map_buf(npu_dev, &req);
	ret = npu_host_map_buf(client, &req);

	if (ret) {
		pr_err("npu_host_map_buf failed\n");
@@ -866,7 +900,7 @@ static int npu_map_buf(struct npu_device *npu_dev, unsigned long arg)
	return 0;
}

static int npu_unmap_buf(struct npu_device *npu_dev, unsigned long arg)
static int npu_unmap_buf(struct npu_client *client, unsigned long arg)
{
	struct msm_npu_unmap_buf_ioctl req;
	void __user *argp = (void __user *)arg;
@@ -879,7 +913,7 @@ static int npu_unmap_buf(struct npu_device *npu_dev, unsigned long arg)
		return -EFAULT;
	}

	ret = npu_host_unmap_buf(npu_dev, &req);
	ret = npu_host_unmap_buf(client, &req);

	if (ret) {
		pr_err("npu_host_unmap_buf failed\n");
@@ -895,7 +929,8 @@ static int npu_unmap_buf(struct npu_device *npu_dev, unsigned long arg)
	return 0;
}

static int npu_load_network(struct npu_device *npu_dev, unsigned long arg)
static int npu_load_network(struct npu_client *client,
	unsigned long arg)
{
	struct msm_npu_load_network_ioctl req;
	void __user *argp = (void __user *)arg;
@@ -910,7 +945,7 @@ static int npu_load_network(struct npu_device *npu_dev, unsigned long arg)

	pr_debug("network load with perf request %d\n", req.perf_mode);

	ret = npu_host_load_network(npu_dev, &req);
	ret = npu_host_load_network(client, &req);
	if (ret) {
		pr_err("network load failed: %d\n", ret);
		return -EFAULT;
@@ -924,7 +959,8 @@ static int npu_load_network(struct npu_device *npu_dev, unsigned long arg)
	return 0;
}

static int npu_load_network_v2(struct npu_device *npu_dev, unsigned long arg)
static int npu_load_network_v2(struct npu_client *client,
	unsigned long arg)
{
	struct msm_npu_load_network_ioctl_v2 req;
	void __user *argp = (void __user *)arg;
@@ -956,7 +992,7 @@ static int npu_load_network_v2(struct npu_device *npu_dev, unsigned long arg)

	pr_debug("network load with perf request %d\n", req.perf_mode);

	ret = npu_host_load_network_v2(npu_dev, &req, patch_info);
	ret = npu_host_load_network_v2(client, &req, patch_info);
	if (ret) {
		pr_err("network load failed: %d\n", ret);
	} else {
@@ -969,7 +1005,8 @@ static int npu_load_network_v2(struct npu_device *npu_dev, unsigned long arg)
	return ret;
}

static int npu_unload_network(struct npu_device *npu_dev, unsigned long arg)
static int npu_unload_network(struct npu_client *client,
	unsigned long arg)
{
	struct msm_npu_unload_network_ioctl req;
	void __user *argp = (void __user *)arg;
@@ -982,7 +1019,7 @@ static int npu_unload_network(struct npu_device *npu_dev, unsigned long arg)
		return -EFAULT;
	}

	ret = npu_host_unload_network(npu_dev, &req);
	ret = npu_host_unload_network(client, &req);

	if (ret) {
		pr_err("npu_host_unload_network failed\n");
@@ -998,7 +1035,8 @@ static int npu_unload_network(struct npu_device *npu_dev, unsigned long arg)
	return 0;
}

static int npu_exec_network(struct npu_device *npu_dev, unsigned long arg)
static int npu_exec_network(struct npu_client *client,
	unsigned long arg)
{
	struct msm_npu_exec_network_ioctl req;
	void __user *argp = (void __user *)arg;
@@ -1011,7 +1049,15 @@ static int npu_exec_network(struct npu_device *npu_dev, unsigned long arg)
		return -EFAULT;
	}

	ret = npu_host_exec_network(npu_dev, &req);
	if ((req.input_layer_num > MSM_NPU_MAX_INPUT_LAYER_NUM) ||
		(req.output_layer_num > MSM_NPU_MAX_OUTPUT_LAYER_NUM)) {
		pr_err("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
			req.input_layer_num, MSM_NPU_MAX_INPUT_LAYER_NUM,
			req.output_layer_num, MSM_NPU_MAX_OUTPUT_LAYER_NUM);
		return -EINVAL;
	}

	ret = npu_host_exec_network(client, &req);

	if (ret) {
		pr_err("npu_host_exec_network failed\n");
@@ -1027,7 +1073,8 @@ static int npu_exec_network(struct npu_device *npu_dev, unsigned long arg)
	return 0;
}

static int npu_exec_network_v2(struct npu_device *npu_dev, unsigned long arg)
static int npu_exec_network_v2(struct npu_client *client,
	unsigned long arg)
{
	struct msm_npu_exec_network_ioctl_v2 req;
	void __user *argp = (void __user *)arg;
@@ -1063,7 +1110,7 @@ static int npu_exec_network_v2(struct npu_device *npu_dev, unsigned long arg)
			req.patch_buf_info_num * sizeof(*patch_buf_info));
	}

	ret = npu_host_exec_network_v2(npu_dev, &req, patch_buf_info);
	ret = npu_host_exec_network_v2(client, &req, patch_buf_info);
	if (ret) {
		pr_err("npu_host_exec_network failed\n");
	} else {
@@ -1076,36 +1123,90 @@ static int npu_exec_network_v2(struct npu_device *npu_dev, unsigned long arg)
	return ret;
}

static int npu_process_kevent(struct npu_kevent *kevt)
{
	int ret = 0;

	switch (kevt->evt.type) {
	case MSM_NPU_EVENT_TYPE_EXEC_V2_DONE:
		ret = copy_to_user((void __user *)kevt->reserved[1],
			(void *)&kevt->reserved[0],
			kevt->evt.u.exec_v2_done.stats_buf_size);
		if (ret) {
			pr_err("fail to copy to user\n");
			kevt->evt.u.exec_v2_done.stats_buf_size = 0;
			ret = -EFAULT;
		}
		break;
	default:
		break;
	}

	return ret;
}

static int npu_receive_event(struct npu_client *client,
	unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	struct npu_kevent *kevt;
	int ret = 0;

	mutex_lock(&client->list_lock);
	if (list_empty(&client->evt_list)) {
		pr_err("event list is empty\n");
		ret = -EINVAL;
	} else {
		kevt = list_first_entry(&client->evt_list,
			struct npu_kevent, list);
		list_del(&kevt->list);
		npu_process_kevent(kevt);
		ret = copy_to_user(argp, &kevt->evt,
			sizeof(struct msm_npu_event));
		if (ret) {
			pr_err("fail to copy to user\n");
			ret = -EFAULT;
		}
		kfree(kevt);
	}
	mutex_unlock(&client->list_lock);

	return ret;
}

static long npu_ioctl(struct file *file, unsigned int cmd,
						 unsigned long arg)
{
	int ret = -ENOIOCTLCMD;
	struct npu_device *npu_dev = file->private_data;
	struct npu_client *client = file->private_data;

	switch (cmd) {
	case MSM_NPU_GET_INFO:
		ret = npu_get_info(npu_dev, arg);
		ret = npu_get_info(client, arg);
		break;
	case MSM_NPU_MAP_BUF:
		ret = npu_map_buf(npu_dev, arg);
		ret = npu_map_buf(client, arg);
		break;
	case MSM_NPU_UNMAP_BUF:
		ret = npu_unmap_buf(npu_dev, arg);
		ret = npu_unmap_buf(client, arg);
		break;
	case MSM_NPU_LOAD_NETWORK:
		ret = npu_load_network(npu_dev, arg);
		ret = npu_load_network(client, arg);
		break;
	case MSM_NPU_LOAD_NETWORK_V2:
		ret = npu_load_network_v2(npu_dev, arg);
		ret = npu_load_network_v2(client, arg);
		break;
	case MSM_NPU_UNLOAD_NETWORK:
		ret = npu_unload_network(npu_dev, arg);
		ret = npu_unload_network(client, arg);
		break;
	case MSM_NPU_EXEC_NETWORK:
		ret = npu_exec_network(npu_dev, arg);
		ret = npu_exec_network(client, arg);
		break;
	case MSM_NPU_EXEC_NETWORK_V2:
		ret = npu_exec_network_v2(npu_dev, arg);
		ret = npu_exec_network_v2(client, arg);
		break;
	case MSM_NPU_RECEIVE_EVENT:
		ret = npu_receive_event(client, arg);
		break;
	default:
		pr_err("unexpected IOCTL %x\n", cmd);
@@ -1114,6 +1215,23 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
	return ret;
}

static unsigned int npu_poll(struct file *filp, struct poll_table_struct *p)
{
	struct npu_client *client = filp->private_data;
	int rc = 0;

	poll_wait(filp, &client->wait, p);

	mutex_lock(&client->list_lock);
	if (!list_empty(&client->evt_list)) {
		pr_debug("poll cmd done\n");
		rc = POLLIN | POLLRDNORM;
	}
	mutex_unlock(&client->list_lock);

	return rc;
}

/* -------------------------------------------------------------------------
 * Device Tree Parsing
 * -------------------------------------------------------------------------
@@ -1514,7 +1632,7 @@ static int npu_probe(struct platform_device *pdev)
		goto error_driver_init;
	}

	INIT_LIST_HEAD(&(npu_dev->mapped_buffers.list));
	mutex_init(&npu_dev->dev_lock);

	rc = npu_host_init(npu_dev);
	if (rc) {
+92 −45
Original line number Diff line number Diff line
@@ -151,69 +151,94 @@ int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev)
 * Functions - ION Memory
 * -------------------------------------------------------------------------
 */
static struct npu_ion_buf *npu_get_npu_ion_buffer(struct npu_device
	*npu_dev)
static struct npu_ion_buf *npu_alloc_npu_ion_buffer(struct npu_client
	*client, int buf_hdl, uint32_t size)
{
	struct npu_ion_buf *ret_val = 0;
	struct npu_ion_buf *ret_val = NULL, *tmp;
	struct list_head *pos = NULL;

	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		tmp = list_entry(pos, struct npu_ion_buf, list);
		if (tmp->fd == buf_hdl) {
			ret_val = tmp;
			break;
		}
	}

	if (ret_val) {
		/* mapped already, treat as invalid request */
		pr_err("ion buf %x has been mapped\n");
		ret_val = NULL;
	} else {
		ret_val = kmalloc(sizeof(struct npu_ion_buf), GFP_KERNEL);
	if (ret_val)
		list_add(&(ret_val->list), &(npu_dev->mapped_buffers.list));
		if (ret_val) {
			ret_val->fd = buf_hdl;
			ret_val->size = size;
			ret_val->iova = 0;
			list_add(&(ret_val->list),
				&(client->mapped_buffer_list));
		}
	}
	mutex_unlock(&client->list_lock);

	return ret_val;
}

static struct npu_ion_buf *npu_get_existing_ion_buffer(struct npu_device
	*npu_dev, int buf_hdl)
static struct npu_ion_buf *npu_get_npu_ion_buffer(struct npu_client
	*client, int buf_hdl)
{
	struct list_head *pos = 0;
	struct npu_ion_buf *npu_ion_buf = 0;
	struct list_head *pos = NULL;
	struct npu_ion_buf *ret_val = NULL, *tmp;

	list_for_each(pos, &(npu_dev->mapped_buffers.list)) {
		npu_ion_buf = list_entry(pos, struct npu_ion_buf, list);
		if (npu_ion_buf->fd == buf_hdl)
			return npu_ion_buf;
	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		tmp = list_entry(pos, struct npu_ion_buf, list);
		if (tmp->fd == buf_hdl) {
			ret_val = tmp;
			break;
		}
	}
	mutex_unlock(&client->list_lock);

	return NULL;
	return ret_val;
}

static struct npu_ion_buf *npu_clear_npu_ion_buffer(struct npu_device
	*npu_dev, int buf_hdl, uint64_t addr)
static void npu_free_npu_ion_buffer(struct npu_client
	*client, int buf_hdl)
{
	struct list_head *pos = 0;
	struct npu_ion_buf *npu_ion_buf = 0;
	struct list_head *pos = NULL;
	struct npu_ion_buf *npu_ion_buf = NULL;

	list_for_each(pos, &(npu_dev->mapped_buffers.list)) {
	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		npu_ion_buf = list_entry(pos, struct npu_ion_buf, list);
		if (npu_ion_buf->fd == buf_hdl &&
			npu_ion_buf->iova == addr) {
		if (npu_ion_buf->fd == buf_hdl) {
			list_del(&npu_ion_buf->list);
			return npu_ion_buf;
			kfree(npu_ion_buf);
			break;
		}
	}

	return NULL;
	mutex_unlock(&client->list_lock);
}

int npu_mem_map(struct npu_device *npu_dev, int buf_hdl, uint32_t size,
int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
	uint64_t *addr)
{
	int ret = 0;

	struct npu_ion_buf *ion_buf = npu_get_npu_ion_buffer(npu_dev);
	struct npu_device *npu_dev = client->npu_dev;
	struct npu_ion_buf *ion_buf = NULL;
	struct npu_smmu_ctx *smmu_ctx = &npu_dev->smmu_ctx;

	if (buf_hdl == 0)
		return -EINVAL;

	ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size);
	if (!ion_buf) {
		pr_err("%s no more table space\n", __func__);
		pr_err("%s fail to alloc npu_ion_buffer\n", __func__);
		ret = -ENOMEM;
		return ret;
	}
	ion_buf->fd = buf_hdl;
	ion_buf->size = size;

	if (ion_buf->fd == 0)
		return -EINVAL;

	smmu_ctx->attach_cnt++;

@@ -250,15 +275,16 @@ int npu_mem_map(struct npu_device *npu_dev, int buf_hdl, uint32_t size,
	ion_buf->size = ion_buf->table->sgl->dma_length;
map_end:
	if (ret)
		npu_mem_unmap(npu_dev, buf_hdl, 0);
		npu_mem_unmap(client, buf_hdl, 0);

	*addr = ion_buf->iova;
	return ret;
}

void npu_mem_invalidate(struct npu_device *npu_dev, int buf_hdl)
void npu_mem_invalidate(struct npu_client *client, int buf_hdl)
{
	struct npu_ion_buf *ion_buf = npu_get_existing_ion_buffer(npu_dev,
	struct npu_device *npu_dev = client->npu_dev;
	struct npu_ion_buf *ion_buf = npu_get_npu_ion_buffer(client,
		buf_hdl);

	if (!ion_buf)
@@ -268,29 +294,50 @@ void npu_mem_invalidate(struct npu_device *npu_dev, int buf_hdl)
			ion_buf->table->nents, DMA_BIDIRECTIONAL);
}

void npu_mem_unmap(struct npu_device *npu_dev, int buf_hdl, uint64_t addr)
bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr)
{
	struct npu_ion_buf *ion_buf = 0;
	struct list_head *pos = NULL;
	bool valid = false;

	/* clear entry and retrieve the corresponding buffer */
	ion_buf = npu_clear_npu_ion_buffer(npu_dev, buf_hdl, addr);
	mutex_lock(&client->list_lock);
	list_for_each(pos, &(client->mapped_buffer_list)) {
		ion_buf = list_entry(pos, struct npu_ion_buf, list);
		if (ion_buf->iova == addr) {
			valid = true;
			break;
		}
	}
	mutex_unlock(&client->list_lock);

	return valid;
}

void npu_mem_unmap(struct npu_client *client, int buf_hdl,  uint64_t addr)
{
	struct npu_device *npu_dev = client->npu_dev;
	struct npu_ion_buf *ion_buf = 0;

	/* clear entry and retrieve the corresponding buffer */
	ion_buf = npu_get_npu_ion_buffer(client, buf_hdl);
	if (!ion_buf) {
		pr_err("%s could not find buffer\n", __func__);
		return;
	}

	if (ion_buf->iova != addr)
		pr_warn("unmap address %lu doesn't match %lu\n", addr,
			ion_buf->iova);

	if (ion_buf->table)
		dma_buf_unmap_attachment(ion_buf->attachment, ion_buf->table,
			DMA_BIDIRECTIONAL);
	ion_buf->table = 0;
	if (ion_buf->dma_buf && ion_buf->attachment)
		dma_buf_detach(ion_buf->dma_buf, ion_buf->attachment);
	ion_buf->attachment = 0;
	if (ion_buf->dma_buf)
		dma_buf_put(ion_buf->dma_buf);
	ion_buf->dma_buf = 0;
	npu_dev->smmu_ctx.attach_cnt--;
	kfree(ion_buf);
	npu_free_npu_ion_buffer(client, buf_hdl);
}

/* -------------------------------------------------------------------------
+5 −3
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@
struct npu_device;
struct npu_ion_buf_t;
struct npu_host_ctx;
struct npu_client;
typedef irqreturn_t (*intr_hdlr_fn)(int32_t irq, void *ptr);
typedef void (*wq_hdlr_fn) (struct work_struct *work);

@@ -57,10 +58,11 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
	uint32_t size);

int npu_mem_map(struct npu_device *npu_dev, int buf_hdl, uint32_t size,
int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
	uint64_t *addr);
void npu_mem_unmap(struct npu_device *npu_dev, int buf_hdl, uint64_t addr);
void npu_mem_invalidate(struct npu_device *npu_dev, int buf_hdl);
void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr);
void npu_mem_invalidate(struct npu_client *client, int buf_hdl);
bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr);

void *npu_ipc_addr(void);
void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num);
Loading