Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5aa72cc authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: adsprpc: Change perf module design"

parents 5fd6cfbf 5d7778a0
Loading
Loading
Loading
Loading
+144 −162
Original line number Diff line number Diff line
@@ -91,6 +91,9 @@
#define NUM_SESSIONS	13	/* max 12 compute, 1 cpz */
#define M_FDLIST	(16)
#define M_CRCLIST	(64)
#define M_KERNEL_PERF_LIST (PERF_KEY_MAX)
#define M_DSP_PERF_LIST (12)

#define SESSION_ID_INDEX (30)
#define FASTRPC_CTX_MAGIC (0xbeeddeed)

@@ -147,8 +150,6 @@

#define RH_CID ADSP_DOMAIN_ID

#define PERF_KEYS \
	"count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke"
#define FASTRPC_STATIC_HANDLE_PROCESS_GROUP (1)
#define FASTRPC_STATIC_HANDLE_DSP_UTILITIES (2)
#define FASTRPC_STATIC_HANDLE_LISTENER (3)
@@ -193,6 +194,8 @@
/* Length of glink transaction history to store */
#define GLINK_MSG_HISTORY_LEN (128)

#define PERF_CAPABILITY   (1 << 1)

/* Fastrpc remote process attributes */
enum fastrpc_proc_attr {
	FASTRPC_MODE_UNSIGNED_MODULE = (1 << 3),
@@ -219,7 +222,7 @@ enum fastrpc_msg_type {
#define PERF(enb, cnt, ff) \
	{\
		struct timespec64 startT = {0};\
		int64_t *counter = cnt;\
		uint64_t *counter = cnt;\
		if (enb && counter) {\
			ktime_get_real_ts64(&startT);\
		} \
@@ -232,8 +235,8 @@ enum fastrpc_msg_type {
#define GET_COUNTER(perf_ptr, offset)  \
	(perf_ptr != NULL ?\
		(((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
			(int64_t *)(perf_ptr + offset)\
				: (int64_t *)NULL) : (int64_t *)NULL)
			(uint64_t *)(perf_ptr + offset)\
				: (uint64_t *)NULL) : (uint64_t *)NULL)

/* Macro for comparing local client and PD names with those from callback */
#define COMPARE_SERVICE_LOCATOR_NAMES(cb_client, local_client, \
@@ -359,6 +362,19 @@ struct overlap {
	int do_cmo;		/*used for cache maintenance of inrout buffers*/
};

struct fastrpc_perf {
	uint64_t count;
	uint64_t flush;
	uint64_t map;
	uint64_t copy;
	uint64_t link;
	uint64_t getargs;
	uint64_t putargs;
	uint64_t invargs;
	uint64_t invoke;
	uint64_t tid;
};

struct smq_invoke_ctx {
	struct hlist_node hn;
	/* Async node to add to async job ctx list */
@@ -383,8 +399,11 @@ struct smq_invoke_ctx {
	struct overlap **overps;
	struct smq_msg msg;
	uint32_t *crc;
	uint64_t *perf_kernel;
	uint64_t *perf_dsp;
	unsigned int magic;
	uint64_t ctxid;
	struct fastrpc_perf *perf;
	/* response flags from remote processor */
	enum fastrpc_response_flags rsp_flags;
	/* user hint of completion time in us */
@@ -535,21 +554,8 @@ enum fastrpc_perfkeys {
	PERF_PUTARGS = 6,
	PERF_INVARGS = 7,
	PERF_INVOKE = 8,
	PERF_KEY_MAX = 9,
};

struct fastrpc_perf {
	int64_t count;
	int64_t flush;
	int64_t map;
	int64_t copy;
	int64_t link;
	int64_t getargs;
	int64_t putargs;
	int64_t invargs;
	int64_t invoke;
	int64_t tid;
	struct hlist_node hn;
	PERF_TID = 9,
	PERF_KEY_MAX = 10,
};

struct fastrpc_file {
@@ -583,9 +589,7 @@ struct fastrpc_file {
	int file_close;
	int dsp_proc_init;
	struct fastrpc_apps *apps;
	struct hlist_head perf;
	struct dentry *debugfs_file;
	struct mutex perf_mutex;
	struct dev_pm_qos_request *dev_pm_qos_req;
	int qos_request;
	struct mutex map_mutex;
@@ -680,7 +684,7 @@ static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};

static uint32_t kernel_capabilities[FASTRPC_MAX_ATTRIBUTES -
					FASTRPC_MAX_DSP_ATTRIBUTES] = {
	1,
	PERF_CAPABILITY,
	/* PERF_LOGGING_V2_SUPPORT feature is supported, unsupported = 0 */
	1
	/* Fastrpc Driver error code changes present */
@@ -716,54 +720,14 @@ static inline int64_t get_timestamp_in_ns(void)
	ns = timespec64_to_ns(&ts);
	return ns;
}

static inline int64_t *getperfcounter(struct fastrpc_file *fl, int key)
{
	int err = 0;
	int64_t *val = NULL;
	struct fastrpc_perf *perf = NULL, *fperf = NULL;
	struct hlist_node *n = NULL;

	VERIFY(err, !IS_ERR_OR_NULL(fl));
	if (err)
		goto bail;

	mutex_lock(&fl->perf_mutex);
	hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
		if (perf->tid == current->pid) {
			fperf = perf;
			break;
		}
	}

	if (IS_ERR_OR_NULL(fperf)) {
		fperf = kzalloc(sizeof(*fperf), GFP_KERNEL);

		VERIFY(err, !IS_ERR_OR_NULL(fperf));
		if (err) {
			mutex_unlock(&fl->perf_mutex);
			kfree(fperf);
			goto bail;
		}

		fperf->tid = current->pid;
		hlist_add_head(&fperf->hn, &fl->perf);
	}

	val = ((int64_t *)fperf) + key;
	mutex_unlock(&fl->perf_mutex);
bail:
	return val;
}

static inline int poll_on_early_response(struct smq_invoke_ctx *ctx)
{
	int ii, jj, err = -EIO;
	uint32_t sc = ctx->sc;
	struct smq_invoke_buf *list;
	struct smq_phy_page *pages;
	uint64_t *fdlist;
	uint32_t *crclist, *poll;
	uint64_t *fdlist = NULL;
	uint32_t *crclist = NULL, *poll = NULL;
	unsigned int inbufs, outbufs, handles;

	/* calculate poll memory location */
@@ -1886,6 +1850,8 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
		}
	}
	ctx->crc = (uint32_t *)invokefd->crc;
	ctx->perf_dsp = (uint64_t *)invokefd->perf_dsp;
	ctx->perf_kernel = (uint64_t *)invokefd->perf_kernel;
	ctx->handle = invoke->handle;
	ctx->sc = invoke->sc;
	if (bufs) {
@@ -1902,6 +1868,18 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
	ctx->is_work_done = false;
	ctx->copybuf = NULL;
	ctx->is_early_wakeup = false;

	if (ctx->fl->profile) {
		ctx->perf = kzalloc(sizeof(*(ctx->perf)), GFP_KERNEL);
		VERIFY(err, !IS_ERR_OR_NULL(ctx->perf));
		if (err) {
			kfree(ctx->perf);
			err = -ENOMEM;
			goto bail;
		}
		memset(ctx->perf, 0, sizeof(*(ctx->perf)));
		ctx->perf->tid = fl->tgid;
	}
	if (invokefd->job) {
		K_COPY_FROM_USER(err, kernel, &ctx->asyncjob, invokefd->job,
						sizeof(ctx->asyncjob));
@@ -2002,6 +1980,8 @@ static void context_free(struct smq_invoke_ctx *ctx)
	ctx->lrpra = NULL;
	ctx->magic = 0;
	ctx->ctxid = 0;
	if (ctx->fl->profile)
		kfree(ctx->perf);

	trace_fastrpc_context_free((uint64_t)ctx,
		ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc);
@@ -2225,13 +2205,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
	int i, oix;
	int err = 0;
	int mflags = 0;
	uint64_t *fdlist;
	uint32_t *crclist;
	uint64_t *fdlist = NULL;
	uint32_t *crclist = NULL;
	uint32_t early_hint;
	int64_t *perf_counter = NULL;
	uint64_t *perf_counter = NULL;

	if (ctx->fl->profile)
		perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
		perf_counter = (uint64_t *)ctx->perf + PERF_COUNT;

	/* calculate size of the metadata */
	rpra = NULL;
@@ -2275,9 +2255,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
	}
	mutex_unlock(&ctx->fl->map_mutex);

	/* metalen includes meta data, fds, crc and early wakeup hint */
	/* metalen includes meta data, fds, crc, dsp perf and early wakeup hint */
	metalen = totallen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
			(sizeof(uint32_t) * M_CRCLIST) + sizeof(early_hint);
			(sizeof(uint32_t) * M_CRCLIST) + (sizeof(uint64_t) * M_DSP_PERF_LIST) +
			sizeof(early_hint);

	if (metalen) {
		err = fastrpc_buf_alloc(ctx->fl, metalen, 0, 0,
@@ -2413,8 +2394,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
	crclist = (uint32_t *)&fdlist[M_FDLIST];
	/* reset fds, crc and early wakeup hint memory */
	/* remote process updates these values before responding */
	memset(fdlist, 0, sizeof(uint64_t)*M_FDLIST +
			sizeof(uint32_t)*M_CRCLIST + sizeof(early_hint));
	memset(fdlist, 0, sizeof(uint64_t)*M_FDLIST + sizeof(uint32_t)*M_CRCLIST +
			(sizeof(uint64_t) * M_DSP_PERF_LIST) + sizeof(early_hint));

	/* copy non ion buffers */
	PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
@@ -2564,7 +2545,8 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
	struct smq_phy_page *pages;
	struct fastrpc_mmap *mmap;
	uint64_t *fdlist;
	uint32_t *crclist = NULL;
	uint32_t *crclist = NULL, *poll = NULL;
	uint64_t *perf_dsp_list = NULL;

	remote_arg64_t *rpra = ctx->lrpra;
	int i, inbufs, outbufs, handles;
@@ -2577,6 +2559,8 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
	pages = smq_phy_page_start(sc, list);
	fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
	crclist = (uint32_t *)(fdlist + M_FDLIST);
	poll = (uint32_t *)(crclist + M_CRCLIST);
	perf_dsp_list = (uint64_t *)(poll + 1);

	for (i = inbufs; i < inbufs + outbufs; ++i) {
		if (!ctx->maps[i]) {
@@ -2610,6 +2594,9 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx,
	if (ctx->crc && crclist && rpra)
		K_COPY_TO_USER(err, kernel, ctx->crc,
			crclist, M_CRCLIST*sizeof(uint32_t));
	if (ctx->perf_dsp && perf_dsp_list)
		K_COPY_TO_USER_WITHOUT_ERR(kernel, ctx->perf_dsp,
			perf_dsp_list, M_DSP_PERF_LIST*sizeof(uint64_t));

 bail:
	return err;
@@ -2937,18 +2924,18 @@ static void fastrpc_wait_for_completion(struct smq_invoke_ctx *ctx,
	return;
}

static void fastrpc_update_invoke_count(uint32_t handle, int64_t *perf_counter,
static void fastrpc_update_invoke_count(uint32_t handle, uint64_t *perf_counter,
					struct timespec64 *invoket)
{
	/* update invoke count for dynamic handles */
	if (handle != FASTRPC_STATIC_HANDLE_LISTENER) {
		int64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);
		uint64_t *count = GET_COUNTER(perf_counter, PERF_INVOKE);

		if (count)
			*count += getnstimediff(invoket);
	}
	if (handle > FASTRPC_STATIC_HANDLE_MAX) {
		int64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);
		uint64_t *count = GET_COUNTER(perf_counter, PERF_COUNT);

		if (count)
			*count += 1;
@@ -2963,7 +2950,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
	struct fastrpc_ioctl_invoke *invoke = &inv->inv;
	int err = 0, interrupted = 0, cid = -1;
	struct timespec64 invoket = {0};
	int64_t *perf_counter = NULL;
	uint64_t *perf_counter = NULL;
	bool isasyncinvoke = false, isworkdone = false;

	cid = fl->cid;
@@ -2977,7 +2964,6 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
	}

	if (fl->profile) {
		perf_counter = getperfcounter(fl, PERF_COUNT);
		ktime_get_real_ts64(&invoket);
	}

@@ -3016,6 +3002,8 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
	if (err)
		goto bail;
	isasyncinvoke = (ctx->asyncjob.isasyncjob ? true : false);
	if (fl->profile)
		perf_counter = (uint64_t *)ctx->perf + PERF_COUNT;
	PERF(fl->profile, GET_COUNTER(perf_counter, PERF_GETARGS),
	VERIFY(err, 0 == (err = get_args(kernel, ctx)));
	PERF_END);
@@ -3067,14 +3055,27 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
		trace_fastrpc_context_interrupt(cid, (uint64_t)ctx,
			ctx->msg.invoke.header.ctx, ctx->handle, ctx->sc);
		context_save_interrupted(ctx);
	} else if (ctx) {
		if (fl->profile && !interrupted)
			fastrpc_update_invoke_count(invoke->handle,
				perf_counter, &invoket);
		if (fl->profile && ctx->handle > FASTRPC_STATIC_HANDLE_MAX) {
			trace_fastrpc_perf_counters(ctx->handle, ctx->sc,
			ctx->perf->count, ctx->perf->flush, ctx->perf->map,
			ctx->perf->copy, ctx->perf->link, ctx->perf->getargs,
			ctx->perf->putargs, ctx->perf->invargs,
			ctx->perf->invoke, ctx->perf->tid);
			if (ctx->perf_kernel)
				K_COPY_TO_USER_WITHOUT_ERR(kernel, ctx->perf_kernel,
				ctx->perf, M_KERNEL_PERF_LIST*sizeof(uint64_t));
		}
	else if (ctx)
		context_free(ctx);
	}
	if (fl->ssrcount != fl->apps->channel[cid].ssrcount)
		err = -ECONNRESET;

invoke_end:
	if (fl->profile && !interrupted)
	if (fl->profile && !interrupted && isasyncinvoke)
		fastrpc_update_invoke_count(invoke->handle, perf_counter,
						&invoket);
	return err;
@@ -3087,7 +3088,7 @@ static int fastrpc_wait_on_async_queue(
	int err = 0, ierr = 0, interrupted = 0;
	struct smq_invoke_ctx *ctx = NULL, *ictx = NULL, *n = NULL;
	unsigned long flags;
	int64_t *perf_counter = NULL;
	uint64_t *perf_counter = NULL;
	bool isworkdone = false;

read_async_job:
@@ -3109,6 +3110,8 @@ static int fastrpc_wait_on_async_queue(
		break;
	}
	spin_unlock_irqrestore(&fl->aqlock, flags);
	if (fl->profile)
		perf_counter = (uint64_t *)ctx->perf + PERF_COUNT;
	if (ctx) {
		fastrpc_wait_for_completion(ctx, &interrupted, 0, 1,
							&isworkdone);
@@ -3120,6 +3123,10 @@ static int fastrpc_wait_on_async_queue(
		}
		async_res->jobid = ctx->asyncjob.jobid;
		async_res->result = ctx->retval;
		async_res->handle = ctx->handle;
		async_res->sc = ctx->sc;
		async_res->perf_dsp = (uint64_t *)ctx->perf_dsp;
		async_res->perf_kernel = (uint64_t *)ctx->perf_kernel;

		PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
		inv_args(ctx);
@@ -3138,8 +3145,19 @@ static int fastrpc_wait_on_async_queue(
bail:
	if (ierr)
		async_res->result = ierr;
	if (ctx)
	if (ctx) {
		if (fl->profile && ctx->handle > FASTRPC_STATIC_HANDLE_MAX) {
			trace_fastrpc_perf_counters(ctx->handle, ctx->sc,
			ctx->perf->count, ctx->perf->flush, ctx->perf->map,
			ctx->perf->copy, ctx->perf->link, ctx->perf->getargs,
			ctx->perf->putargs, ctx->perf->invargs,
			ctx->perf->invoke, ctx->perf->tid);
			if (ctx->perf_kernel)
				K_COPY_TO_USER_WITHOUT_ERR(0, ctx->perf_kernel,
				ctx->perf, M_KERNEL_PERF_LIST*sizeof(uint64_t));
		}
		context_free(ctx);
	}
	return err;
}

@@ -3231,6 +3249,7 @@ static int fastrpc_internal_invoke2(struct fastrpc_file *fl,
{
	union {
		struct fastrpc_ioctl_invoke_async inv;
		struct fastrpc_ioctl_invoke_async_no_perf inv3;
		struct fastrpc_ioctl_async_response async_res;
		uint32_t user_concurrency;
	} p;
@@ -3254,14 +3273,23 @@ static int fastrpc_internal_invoke2(struct fastrpc_file *fl,
	switch (inv2->req) {
	case FASTRPC_INVOKE2_ASYNC:
		size = sizeof(struct fastrpc_ioctl_invoke_async);
		VERIFY(err, size == inv2->size);
		VERIFY(err, size >= inv2->size);
		if (err) {
			err = -EBADE;
			goto bail;
		}
		if (size > inv2->size) {
			K_COPY_FROM_USER(err, 0, &p.inv3, (void *)inv2->invparam,
				sizeof(struct fastrpc_ioctl_invoke_async_no_perf));
			if (err)
				goto bail;
			memcpy(&p.inv, &p.inv3, sizeof(struct fastrpc_ioctl_invoke_crc));
			memcpy(&p.inv.job, &p.inv3.job, sizeof(struct fastrpc_async_job));
		} else {
			K_COPY_FROM_USER(err, 0, &p.inv, (void *)inv2->invparam, size);
			if (err)
				goto bail;
		}
		VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, fl->mode,
					USER_MSG, &p.inv)));
		if (err)
@@ -3269,7 +3297,7 @@ static int fastrpc_internal_invoke2(struct fastrpc_file *fl,
		break;
	case FASTRPC_INVOKE2_ASYNC_RESPONSE:
		VERIFY(err,
		sizeof(struct fastrpc_ioctl_async_response) == inv2->size);
		sizeof(struct fastrpc_ioctl_async_response) >= inv2->size);
		if (err) {
			err = -EBADE;
			goto bail;
@@ -3355,6 +3383,8 @@ static int fastrpc_init_attach_process(struct fastrpc_file *fl,
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;

	if (init->flags == FASTRPC_INIT_ATTACH)
@@ -3541,6 +3571,8 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl,
	ioctl.fds = fds;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl);
	if (err)
@@ -3686,6 +3718,8 @@ static int fastrpc_init_create_static_process(struct fastrpc_file *fl,
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl);
	if (err)
@@ -3785,6 +3819,8 @@ static int fastrpc_send_cpuinfo_to_dsp(struct fastrpc_file *fl)
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	fl->pd = 1;

@@ -3824,6 +3860,8 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_file *fl,
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	fl->pd = 1;

@@ -3937,6 +3975,8 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	/*
	 * Pass 2 for "kernel" arg to send kernel msg to DSP
@@ -3998,6 +4038,8 @@ static int fastrpc_mem_map_to_dsp(struct fastrpc_file *fl, int fd, int offset,
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
		FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl)));
@@ -4040,6 +4082,8 @@ static int fastrpc_mem_unmap_to_dsp(struct fastrpc_file *fl, int fd,
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
		FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl)));
@@ -4080,6 +4124,8 @@ static int fastrpc_unmap_on_dsp(struct fastrpc_file *fl,
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
		FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl)));
@@ -4132,6 +4178,8 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
	ioctl.fds = NULL;
	ioctl.attrs = NULL;
	ioctl.crc = NULL;
	ioctl.perf_kernel = NULL;
	ioctl.perf_dsp = NULL;
	ioctl.job = NULL;
	VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
		FASTRPC_MODE_PARALLEL, KERNEL_MSG_WITH_ZERO_PID, &ioctl)));
@@ -4195,6 +4243,8 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
		ioctl.fds = NULL;
		ioctl.attrs = NULL;
		ioctl.crc = NULL;
		ioctl.perf_kernel = NULL;
		ioctl.perf_dsp = NULL;
		ioctl.job = NULL;

		VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
@@ -4860,7 +4910,6 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
{
	struct hlist_node *n = NULL;
	struct fastrpc_mmap *map = NULL, *lmap = NULL;
	struct fastrpc_perf *perf = NULL, *fperf = NULL;
	unsigned long flags;
	int cid;

@@ -4913,21 +4962,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
	if (fl->secsctx)
		fastrpc_session_free(&fl->apps->channel[cid], fl->secsctx);

	mutex_lock(&fl->perf_mutex);
	do {
		struct hlist_node *pn = NULL;

		fperf = NULL;
		hlist_for_each_entry_safe(perf, pn, &fl->perf, hn) {
			hlist_del_init(&perf->hn);
			fperf = perf;
			break;
		}
		kfree(fperf);
	} while (fperf);
	fastrpc_remote_buf_list_free(fl);
	mutex_unlock(&fl->perf_mutex);
	mutex_destroy(&fl->perf_mutex);
	mutex_destroy(&fl->map_mutex);
	mutex_destroy(&fl->internal_map_mutex);
	kfree(fl->dev_pm_qos_req);
@@ -5278,7 +5313,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
	spin_lock_init(&fl->hlock);
	spin_lock_init(&fl->aqlock);
	INIT_HLIST_HEAD(&fl->maps);
	INIT_HLIST_HEAD(&fl->perf);
	INIT_HLIST_HEAD(&fl->cached_bufs);
	fl->num_cached_buf = 0;
	INIT_HLIST_HEAD(&fl->remote_bufs);
@@ -5291,7 +5325,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
	fl->cid = -1;
	fl->dev_minor = dev_minor;
	fl->init_mem = NULL;
	memset(&fl->perf, 0, sizeof(fl->perf));
	fl->qos_request = 0;
	fl->dsp_proc_init = 0;
	filp->private_data = fl;
@@ -5300,7 +5333,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
	spin_lock(&me->hlock);
	hlist_add_head(&fl->hn, &me->drivers);
	spin_unlock(&me->hlock);
	mutex_init(&fl->perf_mutex);
	fl->dev_pm_qos_req = kcalloc(me->silvercores.corecount,
				sizeof(struct dev_pm_qos_request),
				GFP_KERNEL);
@@ -5582,57 +5614,6 @@ static int fastrpc_setmode(unsigned long ioctl_param,
	return err;
}

static int fastrpc_getperf(struct fastrpc_ioctl_perf *ioctl_perf,
				void *param, struct fastrpc_file *fl)
{
	int err = 0;

	K_COPY_FROM_USER(err, 0, ioctl_perf,
				param, sizeof(*ioctl_perf));
	if (err) {
		err = -EFAULT;
		goto bail;
	}
	ioctl_perf->numkeys = PERF_KEY_MAX;
	if (ioctl_perf->keys) {
		char *keys = PERF_KEYS;

		K_COPY_TO_USER(err, 0, (void *)ioctl_perf->keys,
					 keys, strlen(keys)+1);
		if (err) {
			err = -EFAULT;
			goto bail;
		}
	}
	if (ioctl_perf->data) {
		struct fastrpc_perf *perf = NULL, *fperf = NULL;
		struct hlist_node *n = NULL;

		mutex_lock(&fl->perf_mutex);
		hlist_for_each_entry_safe(perf, n, &fl->perf, hn) {
			if (perf->tid == current->pid) {
				fperf = perf;
				break;
			}
		}

		mutex_unlock(&fl->perf_mutex);

		if (fperf) {
			K_COPY_TO_USER(err, 0,
				(void *)ioctl_perf->data, fperf,
				sizeof(*fperf) - sizeof(struct hlist_node));
		}
	}
	K_COPY_TO_USER(err, 0, param, ioctl_perf, sizeof(*ioctl_perf));
	if (err) {
		err = -EFAULT;
		goto bail;
	}
bail:
	return err;
}

static int fastrpc_control(struct fastrpc_ioctl_control *cp,
				void *param, struct fastrpc_file *fl)
{
@@ -5692,7 +5673,6 @@ union fastrpc_ioctl_param {
	struct fastrpc_ioctl_munmap_64 munmap64;
	struct fastrpc_ioctl_munmap_fd munmap_fd;
	struct fastrpc_ioctl_init_attrs init;
	struct fastrpc_ioctl_perf perf;
	struct fastrpc_ioctl_control cp;
	struct fastrpc_ioctl_capability cap;
	struct fastrpc_ioctl_invoke2 inv2;
@@ -5836,6 +5816,8 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
	p.inv.fds = NULL;
	p.inv.attrs = NULL;
	p.inv.crc = NULL;
	p.inv.perf_kernel = NULL;
	p.inv.perf_dsp = NULL;
	p.inv.job = NULL;

	err = fastrpc_check_pd_status(fl,
@@ -5867,6 +5849,9 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
	case FASTRPC_IOCTL_INVOKE_CRC:
		if (!size)
			size = sizeof(struct fastrpc_ioctl_invoke_crc);
	case FASTRPC_IOCTL_INVOKE_PERF:
		if (!size)
			size = sizeof(struct fastrpc_ioctl_invoke_perf);
		K_COPY_FROM_USER(err, 0, &p.inv, param, size);
		if (err) {
			err = -EFAULT;
@@ -5891,9 +5876,6 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
	case FASTRPC_IOCTL_SETMODE:
		err = fastrpc_setmode(ioctl_param, fl);
		break;
	case FASTRPC_IOCTL_GETPERF:
		err = fastrpc_getperf(&p.perf, param, fl);
		break;
	case FASTRPC_IOCTL_CONTROL:
		err = fastrpc_control(&p.cp, param, fl);
		break;
+81 −75

File changed.

Preview size limit exceeded, changes collapsed.

+35 −7
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@
#define FASTRPC_IOCTL_INVOKE_ATTRS \
				_IOWR('R', 7, struct fastrpc_ioctl_invoke_attrs)
#define FASTRPC_IOCTL_GETINFO	_IOWR('R', 8, uint32_t)
#define FASTRPC_IOCTL_GETPERF	_IOWR('R', 9, struct fastrpc_ioctl_perf)
//#define FASTRPC_IOCTL_GETPERF	_IOWR('R', 9, struct fastrpc_ioctl_perf)
#define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs)
#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc)
#define FASTRPC_IOCTL_CONTROL   _IOWR('R', 12, struct fastrpc_ioctl_control)
@@ -28,6 +28,8 @@
#define FASTRPC_IOCTL_INVOKE2   _IOWR('R', 18, struct fastrpc_ioctl_invoke2)
#define FASTRPC_IOCTL_MEM_MAP   _IOWR('R', 19, struct fastrpc_ioctl_mem_map)
#define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 20, struct fastrpc_ioctl_mem_unmap)
#define FASTRPC_IOCTL_INVOKE_PERF \
		_IOWR('R', 21, struct fastrpc_ioctl_invoke_perf)

#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
@@ -133,6 +135,15 @@ do {\
} while (0)
#endif

#define K_COPY_TO_USER_WITHOUT_ERR(kernel, dst, src, size) \
	do {\
		if (!(kernel))\
			copy_to_user((void __user *)(dst),\
			(src), (size));\
		else\
			memmove((dst), (src), (size));\
	} while (0)

#define ADSPRPC_ERR(fmt, args...)\
	pr_err("Error: adsprpc (%d): %s: %s: " fmt, __LINE__,\
	current->comm, __func__, ##args)
@@ -209,6 +220,15 @@ struct fastrpc_ioctl_invoke_crc {
	unsigned int *crc;
};

struct fastrpc_ioctl_invoke_perf {
	struct fastrpc_ioctl_invoke inv;
	int *fds;
	unsigned int *attrs;
	unsigned int *crc;
	uint64_t *perf_kernel;
	uint64_t *perf_dsp;
};

struct fastrpc_async_job {
	uint32_t isasyncjob; /* flag to distinguish async job */
	uint64_t jobid;      /* job id generated by user */
@@ -216,6 +236,16 @@ struct fastrpc_async_job {
};

struct fastrpc_ioctl_invoke_async {
	struct fastrpc_ioctl_invoke inv;
	int *fds;		/* fd list */
	unsigned int *attrs;	/* attribute list */
	unsigned int *crc;
	uint64_t *perf_kernel;
	uint64_t *perf_dsp;
	struct fastrpc_async_job *job; /* async job*/
};

struct fastrpc_ioctl_invoke_async_no_perf {
	struct fastrpc_ioctl_invoke inv;
	int *fds;		/* fd list */
	unsigned int *attrs;	/* attribute list */
@@ -226,6 +256,10 @@ struct fastrpc_ioctl_invoke_async {
struct fastrpc_ioctl_async_response {
	uint64_t jobid;/* job id generated by user */
	int result; /* result from DSP */
	uint64_t *perf_kernel;
	uint64_t *perf_dsp;
	uint32_t handle;
	uint32_t sc;
};

enum fastrpc_invoke2_type {
@@ -382,12 +416,6 @@ enum dsp_map_flags {
	ADSP_MMAP_PERSIST_HDR = 0x4000,
};

struct fastrpc_ioctl_perf {			/* kernel performance data */
	uintptr_t data;
	uint32_t numkeys;
	uintptr_t keys;
};

enum fastrpc_control_type {
	FASTRPC_CONTROL_LATENCY		=	1,
	FASTRPC_CONTROL_SMMU		=	2,
+49 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2019, The Linux Foundation. All rights reserved.
 * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
 */

#undef TRACE_SYSTEM
@@ -311,6 +311,54 @@ TRACE_EVENT(fastrpc_context_free,
		__entry->smq_ctx, __entry->ctx, __entry->handle, __entry->sc)
);

TRACE_EVENT(fastrpc_perf_counters,

	TP_PROTO(uint32_t handle, uint32_t sc,
		uint64_t count, uint64_t flush, uint64_t map,
		uint64_t copy, uint64_t link, uint64_t getargs,
		uint64_t putargs, uint64_t invargs, uint64_t invoke,
		uint64_t tid),

	TP_ARGS(handle, sc, count, flush, map, copy, link, getargs,
		putargs, invargs, invoke, tid),

	TP_STRUCT__entry(
		__field(u32, handle)
		__field(u32, sc)
		__field(u64, count)
		__field(u64, flush)
		__field(u64, map)
		__field(u64, copy)
		__field(u64, link)
		__field(u64, getargs)
		__field(u64, putargs)
		__field(u64, invargs)
		__field(u64, invoke)
		__field(u64, tid)
	),

	TP_fast_assign(
		__entry->handle = handle;
		__entry->sc = sc;
		__entry->count = count;
		__entry->flush = flush;
		__entry->map = map;
		__entry->copy = copy;
		__entry->link = link;
		__entry->getargs = getargs;
		__entry->putargs = putargs;
		__entry->invargs = invargs;
		__entry->invoke = invoke;
		__entry->tid = tid;
	),

	TP_printk("for: handle 0x%x, sc 0x%x, count %lld, flush %lld ns, map %lld ns, copy %lld ns, link %lld ns, getargs %lld ns, putargs %lld ns, invargs %lld ns, invoke %lld ns, tid %lld",
		__entry->handle, __entry->sc, __entry->count,
		__entry->flush, __entry->map, __entry->copy, __entry->link,
		__entry->getargs, __entry->putargs, __entry->invargs,
		__entry->invoke, __entry->tid)
);

#endif

/* This part must be outside protection */