Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b85fce5b authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: adsprpc: allocate all remote memory in kernel"

parents 80e022aa e073de74
Loading
Loading
Loading
Loading
+225 −87
Original line number Diff line number Diff line
@@ -56,6 +56,7 @@
#define TZ_PIL_AUTH_QDSP6_PROC 1
#define ADSP_MMAP_HEAP_ADDR 4
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
#define ADSP_MMAP_ADD_PAGES 0x1000
#define FASTRPC_DMAHANDLE_NOMAP (16)

#define FASTRPC_ENOSUCH 39
@@ -192,10 +193,15 @@ struct fastrpc_file;

struct fastrpc_buf {
	struct hlist_node hn;
	struct hlist_node hn_rem;
	struct fastrpc_file *fl;
	void *virt;
	uint64_t phys;
	size_t size;
	unsigned long dma_attr;
	uintptr_t raddr;
	uint32_t flags;
	int remote;
};

struct fastrpc_ctx_lst;
@@ -372,9 +378,11 @@ struct fastrpc_file {
	struct hlist_node hn;
	spinlock_t hlock;
	struct hlist_head maps;
	struct hlist_head bufs;
	struct hlist_head cached_bufs;
	struct hlist_head remote_bufs;
	struct fastrpc_ctx_lst clst;
	struct fastrpc_session_ctx *sctx;
	struct fastrpc_buf *init_mem;
	struct fastrpc_session_ctx *secsctx;
	uint32_t mode;
	uint32_t profile;
@@ -509,10 +517,17 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
		return;
	if (cache) {
		spin_lock(&fl->hlock);
		hlist_add_head(&buf->hn, &fl->bufs);
		hlist_add_head(&buf->hn, &fl->cached_bufs);
		spin_unlock(&fl->hlock);
		return;
	}
	if (buf->remote) {
		spin_lock(&fl->hlock);
		hlist_del_init(&buf->hn_rem);
		spin_unlock(&fl->hlock);
		buf->remote = 0;
		buf->raddr = 0;
	}
	if (!IS_ERR_OR_NULL(buf->virt)) {
		int destVM[1] = {VMID_HLOS};
		int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
@@ -526,13 +541,13 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
			hyp_assign_phys(buf->phys, buf_page_size(buf->size),
				srcVM, 2, destVM, destVMperm, 1);
		}
		dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
					buf->phys);
		dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
					buf->phys, buf->dma_attr);
	}
	kfree(buf);
}

static void fastrpc_buf_list_free(struct fastrpc_file *fl)
static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
{
	struct fastrpc_buf *buf, *free;

@@ -541,7 +556,7 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)

		free = NULL;
		spin_lock(&fl->hlock);
		hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
		hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
			hlist_del_init(&buf->hn);
			free = buf;
			break;
@@ -552,6 +567,25 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)
	} while (free);
}

static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
{
	struct fastrpc_buf *buf, *free;

	do {
		struct hlist_node *n;

		free = NULL;
		spin_lock(&fl->hlock);
		hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
			free = buf;
			break;
		}
		spin_unlock(&fl->hlock);
		if (free)
			fastrpc_buf_free(free, 0);
	} while (free);
}

static void fastrpc_mmap_add(struct fastrpc_mmap *map)
{
	if (map->flags == ADSP_MMAP_HEAP_ADDR ||
@@ -611,17 +645,16 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
	return -ENOTTY;
}

static int dma_alloc_memory(dma_addr_t *region_phys, size_t size)
static int dma_alloc_memory(dma_addr_t *region_phys, size_t size,
			unsigned long dma_attrs)
{
	struct fastrpc_apps *me = &gfa;
	void *vaddr = NULL;
	unsigned long dma_attrs = 0;

	if (me->dev == NULL) {
		pr_err("device adsprpc-mem is not initialized\n");
		return -ENODEV;
	}
	dma_attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
	vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
								dma_attrs);
	if (!vaddr) {
@@ -782,9 +815,12 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
	map->attr = attr;
	if (mflags == ADSP_MMAP_HEAP_ADDR ||
				mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
		unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
						DMA_ATTR_NO_KERNEL_MAPPING;

		map->apps = me;
		map->fl = NULL;
		VERIFY(err, !dma_alloc_memory(&region_phys, len));
		VERIFY(err, !dma_alloc_memory(&region_phys, len, dma_attrs));
		if (err)
			goto bail;
		map->phys = (uintptr_t)region_phys;
@@ -923,7 +959,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
}

static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
			     struct fastrpc_buf **obuf)
				unsigned long dma_attr, uint32_t rflags,
				int remote, struct fastrpc_buf **obuf)
{
	int err = 0, vmid;
	struct fastrpc_buf *buf = NULL, *fr = NULL;
@@ -933,9 +970,10 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
	if (err)
		goto bail;

	if (!remote) {
		/* find the smallest buffer that fits in the cache */
		spin_lock(&fl->hlock);
	hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
		hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
			if (buf->size >= size && (!fr || fr->size > buf->size))
				fr = buf;
		}
@@ -946,6 +984,7 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
			*obuf = fr;
			return 0;
		}
	}
	buf = NULL;
	VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
	if (err)
@@ -955,17 +994,27 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
	buf->virt = NULL;
	buf->phys = 0;
	buf->size = size;
	buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
				       (void *)&buf->phys, GFP_KERNEL);
	buf->dma_attr = dma_attr;
	buf->flags = rflags;
	buf->raddr = 0;
	buf->remote = 0;
	buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
				       (dma_addr_t *)&buf->phys,
					   GFP_KERNEL, buf->dma_attr);
	if (IS_ERR_OR_NULL(buf->virt)) {
		/* free cache and retry */
		fastrpc_buf_list_free(fl);
		buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
					       (void *)&buf->phys, GFP_KERNEL);
		fastrpc_cached_buf_list_free(fl);
		buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
					       (dma_addr_t *)&buf->phys,
						   GFP_KERNEL, buf->dma_attr);
		VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
	}
	if (err)
	if (err) {
		err = -ENOMEM;
		pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
			current->comm, __func__, size);
		goto bail;
	}
	if (fl->sctx->smmu.cb)
		buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
	vmid = fl->apps->channel[fl->cid].vmid;
@@ -981,6 +1030,13 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
			goto bail;
	}

	if (remote) {
		INIT_HLIST_NODE(&buf->hn_rem);
		spin_lock(&fl->hlock);
		hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
		spin_unlock(&fl->hlock);
		buf->remote = remote;
	}
	*obuf = buf;
 bail:
	if (err && buf)
@@ -1453,7 +1509,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)

	/* allocate new buffer */
	if (copylen) {
		VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
		err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
		if (err)
			goto bail;
	}
@@ -2021,6 +2077,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
	struct fastrpc_ioctl_init *init = &uproc->init;
	struct smq_phy_page pages[1];
	struct fastrpc_mmap *file = NULL, *mem = NULL;
	struct fastrpc_buf *imem = NULL;
	unsigned long imem_dma_attr = 0;
	char *proc_name = NULL;

	VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
@@ -2053,6 +2111,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
		remote_arg_t ra[6];
		int fds[6];
		int mflags = 0;
		int memlen;
		struct {
			int pgid;
			unsigned int namelen;
@@ -2080,16 +2139,24 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
				goto bail;
		}
		inbuf.pageslen = 1;
		VERIFY(err, access_ok(1, (void __user *)init->mem,
			init->memlen));
		if (err)

		VERIFY(err, !init->mem);
		if (err) {
			err = -EINVAL;
			pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
				current->comm, __func__);
			goto bail;
		mutex_lock(&fl->fl_map_mutex);
		VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
				init->mem, init->memlen, mflags, &mem));
		mutex_unlock(&fl->fl_map_mutex);
		}
		memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
						1024*1024);
		imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
						DMA_ATTR_NO_KERNEL_MAPPING |
						DMA_ATTR_FORCE_NON_COHERENT;
		err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
		if (err)
			goto bail;
		fl->init_mem = imem;

		inbuf.pageslen = 1;
		ra[0].buf.pv = (void *)&inbuf;
		ra[0].buf.len = sizeof(inbuf);
@@ -2103,8 +2170,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
		ra[2].buf.len = inbuf.filelen;
		fds[2] = init->filefd;

		pages[0].addr = mem->phys;
		pages[0].size = mem->size;
		pages[0].addr = imem->phys;
		pages[0].size = imem->size;
		ra[3].buf.pv = (void *)pages;
		ra[3].buf.len = 1 * sizeof(*pages);
		fds[3] = 0;
@@ -2270,7 +2337,8 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
}

static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
			       struct fastrpc_mmap *map)
					uintptr_t va, uint64_t phys,
					size_t size, uintptr_t *raddr)
{
	struct fastrpc_ioctl_invoke_crc ioctl;
	struct fastrpc_apps *me = &gfa;
@@ -2289,13 +2357,13 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
	} routargs;

	inargs.pid = fl->tgid;
	inargs.vaddrin = (uintptr_t)map->va;
	inargs.vaddrin = (uintptr_t)va;
	inargs.flags = flags;
	inargs.num = fl->apps->compat ? num * sizeof(page) : num;
	ra[0].buf.pv = (void *)&inargs;
	ra[0].buf.len = sizeof(inargs);
	page.addr = map->phys;
	page.size = map->size;
	page.addr = phys;
	page.size = size;
	ra[1].buf.pv = (void *)&page;
	ra[1].buf.len = num * sizeof(page);

@@ -2313,20 +2381,20 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
	ioctl.crc = NULL;
	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
	map->raddr = (uintptr_t)routargs.vaddrout;
	*raddr = (uintptr_t)routargs.vaddrout;
	if (err)
		goto bail;
	if (flags == ADSP_MMAP_HEAP_ADDR) {
		struct scm_desc desc = {0};

		desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
		desc.args[1] = map->phys;
		desc.args[2] = map->size;
		desc.args[1] = phys;
		desc.args[2] = size;
		desc.arginfo = SCM_ARGS(3);
		err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
			TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
	} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
		VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
				hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
				me->channel[fl->cid].rhvm.vmperm,
				me->channel[fl->cid].rhvm.vmcount));
@@ -2337,15 +2405,15 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
	return err;
}

static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
				 struct fastrpc_mmap *map)
static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
						size_t size, uint32_t flags)
{
	int err = 0;
	struct fastrpc_apps *me = &gfa;
	int destVM[1] = {VMID_HLOS};
	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};

	if (map->flags == ADSP_MMAP_HEAP_ADDR) {
	if (flags == ADSP_MMAP_HEAP_ADDR) {
		struct fastrpc_ioctl_invoke_crc ioctl;
		struct scm_desc desc = {0};
		remote_arg_t ra[1];
@@ -2371,14 +2439,14 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
		if (err)
			goto bail;
		desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
		desc.args[1] = map->phys;
		desc.args[2] = map->size;
		desc.args[1] = phys;
		desc.args[2] = size;
		desc.args[3] = routargs.skey;
		desc.arginfo = SCM_ARGS(4);
		err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
			TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
	} else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
		VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
	} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
		VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
					me->channel[fl->cid].rhvm.vmid,
					me->channel[fl->cid].rhvm.vmcount,
					destVM, destVMperm, 1));
@@ -2390,8 +2458,8 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
	return err;
}

static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
				 struct fastrpc_mmap *map)
static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
				uint64_t phys, size_t size, uint32_t flags)
{
	struct fastrpc_ioctl_invoke_crc ioctl;
	remote_arg_t ra[1];
@@ -2403,8 +2471,8 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
	} inargs;

	inargs.pid = fl->tgid;
	inargs.size = map->size;
	inargs.vaddrout = map->raddr;
	inargs.size = size;
	inargs.vaddrout = raddr;
	ra[0].buf.pv = (void *)&inargs;
	ra[0].buf.len = sizeof(inargs);

@@ -2421,9 +2489,9 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
		FASTRPC_MODE_PARALLEL, 1, &ioctl)));
	if (err)
		goto bail;
	if (map->flags == ADSP_MMAP_HEAP_ADDR ||
				map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
		VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
	if (flags == ADSP_MMAP_HEAP_ADDR ||
				flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
		VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
		if (err)
			goto bail;
	}
@@ -2450,7 +2518,8 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
		spin_unlock(&me->hlock);

		if (match) {
			VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
			VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
						match->size, match->flags));
			if (err)
				goto bail;
			if (me->channel[0].ramdumpenabled) {
@@ -2539,14 +2608,40 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
{
	int err = 0;
	struct fastrpc_mmap *map = NULL;
	struct fastrpc_buf *rbuf = NULL, *free = NULL;
	struct hlist_node *n;

	mutex_lock(&fl->map_mutex);

	spin_lock(&fl->hlock);
	hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
		if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
			if ((rbuf->raddr == ud->vaddrout) &&
				(rbuf->size == ud->size)) {
				free = rbuf;
				break;
			}
		}
	}
	spin_unlock(&fl->hlock);

	if (free) {
		VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
			free->phys, free->size, free->flags));
		if (err)
			goto bail;
		fastrpc_buf_free(rbuf, 0);
		mutex_unlock(&fl->map_mutex);
		return err;
	}

	mutex_lock(&fl->fl_map_mutex);
	VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
	mutex_unlock(&fl->fl_map_mutex);
	if (err)
		goto bail;
	VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
	VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
				map->phys, map->size, map->flags));
	if (err)
		goto bail;
	mutex_lock(&fl->fl_map_mutex);
@@ -2592,9 +2687,34 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
{

	struct fastrpc_mmap *map = NULL;
	struct fastrpc_buf *rbuf = NULL;
	unsigned long dma_attr = 0;
	uintptr_t raddr = 0;
	int err = 0;

	mutex_lock(&fl->map_mutex);
	if (ud->flags == ADSP_MMAP_ADD_PAGES) {
		if (ud->vaddrin) {
			err = -EINVAL;
			pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
					current->comm, __func__);
			goto bail;
		}
		dma_attr = DMA_ATTR_EXEC_MAPPING |
					DMA_ATTR_NO_KERNEL_MAPPING |
					DMA_ATTR_FORCE_NON_COHERENT;
		err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
								1, &rbuf);
		if (err)
			goto bail;
		rbuf->virt = NULL;
		err = fastrpc_mmap_on_dsp(fl, ud->flags,
				(uintptr_t)rbuf->virt,
				rbuf->phys, rbuf->size, &raddr);
		if (err)
			goto bail;
		rbuf->raddr = raddr;
	} else {
		mutex_lock(&fl->fl_map_mutex);
		if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
				 ud->size, ud->flags, 1, &map)) {
@@ -2608,10 +2728,12 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
		mutex_unlock(&fl->fl_map_mutex);
		if (err)
			goto bail;
	VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
		VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map->va,
				map->phys, map->size, &raddr));
		if (err)
			goto bail;
	ud->vaddrout = map->raddr;
		map->raddr = raddr;
	}
 bail:
	if (err && map) {
		mutex_lock(&fl->fl_map_mutex);
@@ -2794,8 +2916,10 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
	spin_lock(&fl->hlock);
	fl->file_close = 1;
	spin_unlock(&fl->hlock);
	if (!IS_ERR_OR_NULL(fl->init_mem))
		fastrpc_buf_free(fl->init_mem, 0);
	fastrpc_context_list_dtor(fl);
	fastrpc_buf_list_free(fl);
	fastrpc_cached_buf_list_free(fl);
	mutex_lock(&fl->fl_map_mutex);
	do {
		lmap = NULL;
@@ -2827,6 +2951,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
		}
		kfree(fperf);
	} while (fperf);
	fastrpc_remote_buf_list_free(fl);
	mutex_unlock(&fl->perf_mutex);
	mutex_destroy(&fl->perf_mutex);
	mutex_destroy(&fl->fl_map_mutex);
@@ -3142,21 +3267,25 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
			"%-20d|0x%-20lX\n\n",
			map->secure, map->attr);
		}
		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
				"%s %d\n\n",
				"KERNEL MEMORY ALLOCATION:", 1);
		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
			"\n======%s %s %s======\n", title,
			" LIST OF BUFS ", title);
			" LIST OF CACHED BUFS ", title);
		spin_lock(&fl->hlock);
		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
			"%-19s|%-19s|%-19s\n",
			"virt", "phys", "size");
			"%-19s|%-19s|%-19s|%-19s\n",
			"virt", "phys", "size", "dma_attr");
		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
			"%s%s%s%s%s\n", single_line, single_line,
			single_line, single_line, single_line);
		hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
		hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
			len += scnprintf(fileinfo + len,
			DEBUGFS_SIZE - len,
			"0x%-17p|0x%-17llX|%-19zu\n",
			buf->virt, (uint64_t)buf->phys, buf->size);
			"0x%-17p|0x%-17llX|%-19zu|0x%-17lX\n",
			buf->virt, (uint64_t)buf->phys, buf->size,
			buf->dma_attr);
		}
		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
			"\n%s %s %s\n", title,
@@ -3315,7 +3444,8 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
	spin_lock_init(&fl->hlock);
	INIT_HLIST_HEAD(&fl->maps);
	INIT_HLIST_HEAD(&fl->perf);
	INIT_HLIST_HEAD(&fl->bufs);
	INIT_HLIST_HEAD(&fl->cached_bufs);
	INIT_HLIST_HEAD(&fl->remote_bufs);
	INIT_HLIST_NODE(&fl->hn);
	fl->sessionid = 0;
	fl->tgid = current->tgid;
@@ -3323,7 +3453,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
	fl->mode = FASTRPC_MODE_SERIAL;
	fl->cid = -1;
	fl->dev_minor = dev_minor;

	fl->init_mem = NULL;
	if (debugfs_file != NULL)
		fl->debugfs_file = debugfs_file;
	fl->qos_request = 0;
@@ -3414,6 +3544,9 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
	case FASTRPC_CONTROL_SMMU:
		fl->sharedcb = cp->smmu.sharedcb;
		break;
	case FASTRPC_CONTROL_KALLOC:
		cp->kalloc.kalloc_support = 1;
		break;
	default:
		err = -ENOTTY;
		break;
@@ -3600,6 +3733,11 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
		VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
		if (err)
			goto bail;
		if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
			K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
			if (err)
				goto bail;
		}
		break;
	case FASTRPC_IOCTL_GETINFO:
	    K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
+17 −1
Original line number Diff line number Diff line
@@ -132,10 +132,16 @@ struct compat_fastrpc_ctrl_latency {
	compat_uint_t level;	/* level of control */
};

#define FASTRPC_CONTROL_KALLOC		(3)
struct compat_fastrpc_ctrl_kalloc {
	compat_uint_t kalloc_support; /* Remote memory allocation from kernel */
};

struct compat_fastrpc_ioctl_control {
	compat_uint_t req;
	union {
		struct compat_fastrpc_ctrl_latency lp;
		struct compat_fastrpc_ctrl_kalloc kalloc;
	};
};

@@ -528,6 +534,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
	{
		struct compat_fastrpc_ioctl_control __user *ctrl32;
		struct fastrpc_ioctl_control __user *ctrl;
		compat_uptr_t p;

		ctrl32 = compat_ptr(arg);
		VERIFY(err, NULL != (ctrl = compat_alloc_user_space(
@@ -540,6 +547,15 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
			return err;
		err = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_CONTROL,
							(unsigned long)ctrl);
		if (err)
			return err;
		err = get_user(p, &ctrl32->req);
		if (err)
			return err;
		if (p == FASTRPC_CONTROL_KALLOC) {
			err = get_user(p, &ctrl->kalloc.kalloc_support);
			err |= put_user(p, &ctrl32->kalloc.kalloc_support);
		}
		return err;
	}
	case COMPAT_FASTRPC_IOCTL_GETPERF:
+13 −5
Original line number Diff line number Diff line
@@ -243,18 +243,26 @@ struct fastrpc_ioctl_perf { /* kernel performance data */

#define FASTRPC_CONTROL_LATENCY (1)
struct fastrpc_ctrl_latency {
	uint32_t enable;	//!latency control enable
	uint32_t level;		//!level of control
	uint32_t enable;		/* latency control enable */
	uint32_t level;			/* level of control */
};

#define FASTRPC_CONTROL_SMMU (2)
struct fastrpc_ctrl_smmu {
	uint32_t sharedcb;
};

#define FASTRPC_CONTROL_KALLOC (3)
struct fastrpc_ctrl_kalloc {
	uint32_t kalloc_support; /* Remote memory allocation from kernel */
};

struct fastrpc_ioctl_control {
	uint32_t req;
	union {
		struct fastrpc_ctrl_latency lp;
		struct fastrpc_ctrl_smmu smmu;
		struct fastrpc_ctrl_kalloc kalloc;
	};
};