Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e0c00ec authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: cvp: Support CVP session abort"

parents e3f3ccb2 396475a0
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -2674,7 +2674,6 @@ static int venus_hfi_session_abort(void *sess)

	mutex_lock(&device->lock);

	__flush_debug_queue(device, NULL);
	rc = __send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT);

	mutex_unlock(&device->lock);
+1 −0
Original line number Diff line number Diff line
@@ -152,6 +152,7 @@ enum hal_ssr_trigger_type {
	SSR_ERR_FATAL = 1,
	SSR_SW_DIV_BY_ZERO,
	SSR_HW_WDOG_IRQ,
	SSR_SESSION_ABORT,
};

struct cvp_hal_profile_level {
+10 −2
Original line number Diff line number Diff line
@@ -573,8 +573,8 @@ struct cvp_hfi_client {
	u32 transaction_id;
	u32 data1;
	u32 data2;
	u32 data3;
	u32 data4;
	u32 kdata1;
	u32 kdata2;
	u32 reserved1;
	u32 reserved2;
};
@@ -637,6 +637,14 @@ struct cvp_session_release_buffers_packet_d {
	u32 buffer_idx;
};

struct cvp_hfi_cmd_session_hdr {
	u32 size;
	u32 packet_type;
	u32 session_id;
	struct cvp_hfi_client client_data;
	u32 stream_idx;
};

struct cvp_hfi_msg_session_hdr {
	u32 size;
	u32 packet_type;
+1 −1
Original line number Diff line number Diff line
@@ -158,7 +158,7 @@ static int hfi_process_session_error(u32 device_id,
		dprintk(CVP_ERR,
			"%s: session %x data1 %#x, data2 %#x\n", __func__,
			pkt->session_id, pkt->event_data1, pkt->event_data2);
		info->response_type = HAL_SESSION_ERROR;
		info->response_type = HAL_RESPONSE_UNUSED;
		break;
	}

+373 −73
Original line number Diff line number Diff line
@@ -8,14 +8,6 @@
#include <synx_api.h>
#include "cvp_core_hfi.h"

#define MSM_CVP_NOMINAL_CYCLES		(444 * 1000 * 1000)
#define MSM_CVP_UHD60E_VPSS_CYCLES	(111 * 1000 * 1000)
#define MSM_CVP_UHD60E_ISE_CYCLES	(175 * 1000 * 1000)
#define MAX_CVP_VPSS_CYCLES		(MSM_CVP_NOMINAL_CYCLES - \
		MSM_CVP_UHD60E_VPSS_CYCLES)
#define MAX_CVP_ISE_CYCLES		(MSM_CVP_NOMINAL_CYCLES - \
		MSM_CVP_UHD60E_ISE_CYCLES)

void print_cvp_internal_buffer(u32 tag, const char *str,
		struct msm_cvp_inst *inst, struct msm_cvp_internal_buffer *cbuf)
{
@@ -92,14 +84,16 @@ static int msm_cvp_get_session_info(struct msm_cvp_inst *inst,
	if (!s)
		return -ECONNRESET;

	s->cur_cmd_type = CVP_KMD_GET_SESSION_INFO;
	session->session_id = hash32_ptr(inst->session);
	dprintk(CVP_DBG, "%s: id 0x%x\n", __func__, session->session_id);

	s->cur_cmd_type = 0;
	cvp_put_inst(s);
	return rc;
}

static int msm_cvp_session_get_iova_addr(
static int msm_cvp_session_get_iova_addr_d(
	struct msm_cvp_inst *inst,
	struct msm_cvp_internal_buffer **cbuf_ptr,
	unsigned int search_fd, unsigned int search_size,
@@ -117,16 +111,6 @@ static int msm_cvp_session_get_iova_addr(
		}
	}
	mutex_unlock(&inst->cvpcpubufs.lock);
	if (!found) {
		mutex_lock(&inst->cvpdspbufs.lock);
		list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
			if (cbuf->buf.fd == search_fd) {
				found = true;
				break;
			}
		}
		mutex_unlock(&inst->cvpdspbufs.lock);
	}
	if (!found)
		return -ENOENT;

@@ -145,6 +129,35 @@ static int msm_cvp_session_get_iova_addr(
	return 0;
}

static int msm_cvp_session_get_iova_addr(
	struct msm_cvp_inst *inst,
	struct cvp_buf_type *in_buf,
	unsigned int *iova)
{
	struct msm_cvp_internal_buffer *cbuf;

	if (!inst | !iova) {
		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
		return -EINVAL;
	}

	mutex_lock(&inst->cvpcpubufs.lock);
	list_for_each_entry(cbuf, &inst->cvpcpubufs.list, list) {
		if (cbuf->buf.fd == in_buf->fd &&
			cbuf->buf.size == in_buf->size &&
			cbuf->buf.offset == in_buf->offset) {
			*iova = cbuf->smem.device_addr + cbuf->buf.offset;
			print_client_buffer(CVP_DBG, "found", inst, &cbuf->buf);
			mutex_unlock(&inst->cvpcpubufs.lock);
			return 0;
		}
	}
	mutex_unlock(&inst->cvpcpubufs.lock);
	*iova = 0;

	return -ENOENT;
}

static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
	struct cvp_kmd_buffer *buf)
{
@@ -186,9 +199,8 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
		return -EINVAL;
	}

	cbuf = kzalloc(sizeof(struct msm_cvp_internal_buffer), GFP_KERNEL);
	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
	if (!cbuf) {
		dprintk(CVP_ERR, "%s: cbuf alloc failed\n", __func__);
		return -ENOMEM;
	}

@@ -205,8 +217,9 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
	}

	if (buf->index) {
		rc = cvp_dsp_register_buffer((uint32_t)cbuf->smem.device_addr,
			buf->index, buf->size, hash32_ptr(session));
		rc = cvp_dsp_register_buffer(hash32_ptr(session), buf->fd,
			 buf->size, buf->offset, buf->index,
			(uint32_t)cbuf->smem.device_addr);
		if (rc) {
			dprintk(CVP_ERR,
				"%s: failed dsp registration for fd=%d rc=%d",
@@ -229,7 +242,7 @@ static int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst,
exit:
	if (cbuf->smem.device_addr)
		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
	kfree(cbuf);
	kmem_cache_free(inst->internal_buf_cache, cbuf);
	cbuf = NULL;

	return rc;
@@ -286,11 +299,11 @@ static int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst,
	list_del(&cbuf->list);
	mutex_unlock(&inst->cvpdspbufs.lock);

	kfree(cbuf);
	kmem_cache_free(inst->internal_buf_cache, cbuf);
	return rc;
}

static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
static int msm_cvp_map_buf_cpu_d(struct msm_cvp_inst *inst,
	unsigned int fd,
	unsigned int size,
	struct msm_cvp_internal_buffer **cbuf_ptr)
@@ -318,12 +331,10 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
		return -EINVAL;
	}

	cbuf = kzalloc(sizeof(struct msm_cvp_internal_buffer), GFP_KERNEL);
	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
	if (!cbuf)
		return -ENOMEM;

	memset(cbuf, 0, sizeof(struct msm_cvp_internal_buffer));

	cbuf->buf.fd = fd;
	cbuf->buf.size = size;
	/* HFI doesn't have buffer type, set it as HAL_BUFFER_INPUT */
@@ -349,12 +360,179 @@ static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
exit:
	if (cbuf->smem.device_addr)
		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
	kfree(cbuf);
	kmem_cache_free(inst->internal_buf_cache, cbuf);
	cbuf = NULL;

	return rc;
}

static void __msm_cvp_cache_operations(struct msm_cvp_internal_buffer *cbuf)
{
	enum smem_cache_ops cache_op;

	if (!cbuf) {
		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
		return;
	}

	switch (cbuf->buf.type) {
	case CVP_KMD_BUFTYPE_INPUT:
		cache_op = SMEM_CACHE_CLEAN;
		break;
	case CVP_KMD_BUFTYPE_OUTPUT:
		cache_op = SMEM_CACHE_INVALIDATE;
		break;
	default:
		cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
	}

	msm_cvp_smem_cache_operations(cbuf->smem.dma_buf, cache_op,
				cbuf->buf.offset, cbuf->buf.size);
}

static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
				struct cvp_buf_type *in_buf,
				u32 *iova,
				struct msm_cvp_frame *frame)
{
	int rc = 0;
	struct msm_cvp_internal_buffer *cbuf;
	struct msm_cvp_frame_buf *frame_buf;

	if (!inst || !iova || !frame) {
		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
		return -EINVAL;
	}

	rc = msm_cvp_session_get_iova_addr(inst, in_buf, iova);
	if (!rc && *iova != 0)
		return 0;

	cbuf = kmem_cache_zalloc(inst->internal_buf_cache, GFP_KERNEL);
	if (!cbuf)
		return -ENOMEM;

	cbuf->buf.fd = in_buf->fd;
	cbuf->buf.size = in_buf->size;
	cbuf->buf.offset = in_buf->offset;
	cbuf->buf.flags = in_buf->flags;
	cbuf->buf.type = CVP_KMD_BUFTYPE_INPUT | CVP_KMD_BUFTYPE_OUTPUT;

	/* HFI doesn't have buffer type, set it as HAL_BUFFER_INPUT */
	cbuf->smem.buffer_type = HAL_BUFFER_INPUT;
	cbuf->smem.fd = cbuf->buf.fd;
	cbuf->smem.size = cbuf->buf.size;
	cbuf->smem.flags = 0;
	cbuf->smem.offset = 0;
	rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
	if (rc) {
		print_client_buffer(CVP_ERR, "map failed", inst, &cbuf->buf);
		goto exit;
	}

	mutex_lock(&inst->cvpcpubufs.lock);
	list_add_tail(&cbuf->list, &inst->cvpcpubufs.list);
	mutex_unlock(&inst->cvpcpubufs.lock);

	__msm_cvp_cache_operations(cbuf);

	*iova = cbuf->smem.device_addr + cbuf->buf.offset;

	frame_buf = kmem_cache_zalloc(inst->frame_buf_cache, GFP_KERNEL);
	if (!frame_buf) {
		rc = -ENOMEM;
		goto exit2;
	}

	memcpy(&frame_buf->buf, in_buf, sizeof(frame_buf->buf));

	mutex_lock(&frame->bufs.lock);
	list_add_tail(&frame_buf->list, &frame->bufs.list);
	mutex_unlock(&frame->bufs.lock);

	print_client_buffer(CVP_DBG, "map", inst, &cbuf->buf);
	return rc;

exit2:
	if (cbuf->smem.device_addr)
		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
	mutex_lock(&inst->cvpcpubufs.lock);
	list_del(&cbuf->list);
	mutex_unlock(&inst->cvpcpubufs.lock);
exit:
	kmem_cache_free(inst->internal_buf_cache, cbuf);
	cbuf = NULL;

	return rc;
}

static void __unmap_buf(struct msm_cvp_inst *inst,
		struct msm_cvp_frame_buf *frame_buf)
{
	struct msm_cvp_internal_buffer *cbuf, *dummy;
	struct cvp_buf_type *buf;

	if (!inst || !frame_buf) {
		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
		return;
	}

	buf = &frame_buf->buf;
	mutex_lock(&inst->cvpcpubufs.lock);
	list_for_each_entry_safe(cbuf, dummy, &inst->cvpcpubufs.list, list) {
		if (cbuf->buf.fd == buf->fd &&
			cbuf->buf.size == buf->size &&
			cbuf->buf.offset == buf->offset) {
			list_del(&cbuf->list);
			msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
			print_client_buffer(CVP_DBG, "unmap", inst, &cbuf->buf);
			kmem_cache_free(inst->internal_buf_cache, cbuf);
			break;
		}
	}
	mutex_unlock(&inst->cvpcpubufs.lock);
}

void msm_cvp_unmap_buf_cpu(struct msm_cvp_inst *inst, u64 ktid)
{
	struct msm_cvp_frame *frame, *dummy1;
	struct msm_cvp_frame_buf *frame_buf, *dummy2;
	bool found;

	if (!inst) {
		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
		return;
	}

	dprintk(CVP_DBG, "%s: unmap frame %llu\n", __func__, ktid);
	found = false;
	mutex_lock(&inst->frames.lock);
	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
		if (frame->ktid == ktid) {
			found = true;
			list_del(&frame->list);
			mutex_lock(&frame->bufs.lock);
			list_for_each_entry_safe(frame_buf, dummy2,
						&frame->bufs.list, list) {
				list_del(&frame_buf->list);
				__unmap_buf(inst, frame_buf);
				kmem_cache_free(inst->frame_buf_cache,
						frame_buf);
			}
			mutex_unlock(&frame->bufs.lock);
			DEINIT_MSM_CVP_LIST(&frame->bufs);
			kmem_cache_free(inst->frame_cache, frame);
			break;
		}
	}
	mutex_unlock(&inst->frames.lock);

	if (!found) {
		dprintk(CVP_WARN, "%s frame %#llx not found!\n",
				__func__, ktid);
	}
}

static bool _cvp_msg_pending(struct msm_cvp_inst *inst,
			struct cvp_session_queue *sq,
			struct cvp_session_msg **msg)
@@ -363,8 +541,7 @@ static bool _cvp_msg_pending(struct msm_cvp_inst *inst,
	bool result = false;

	spin_lock(&sq->lock);
	if (!kref_read(&inst->kref) ||
		sq->state != QUEUE_ACTIVE) {
	if (sq->state != QUEUE_ACTIVE) {
		/* The session is being deleted */
		spin_unlock(&sq->lock);
		*msg = NULL;
@@ -392,6 +569,7 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
	struct cvp_kmd_session_control *sc;
	struct msm_cvp_inst *s;
	int rc = 0;
	u32 version;

	if (!inst) {
		dprintk(CVP_ERR, "%s invalid session\n", __func__);
@@ -402,6 +580,7 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
	if (!s)
		return -ECONNRESET;

	s->cur_cmd_type = CVP_KMD_RECEIVE_MSG_PKT;
	sq = &inst->session_queue;
	sc = (struct cvp_kmd_session_control *)out_pkt;

@@ -409,32 +588,45 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,

	if (wait_event_timeout(sq->wq,
		_cvp_msg_pending(inst, sq, &msg), wait_time) == 0) {
		dprintk(CVP_DBG, "session queue wait timeout\n");
		dprintk(CVP_WARN, "session queue wait timeout\n");
		msm_cvp_comm_kill_session(inst);
		rc = -ETIMEDOUT;
		goto exit;
	}

	version = (get_hfi_version() & HFI_VERSION_MINOR_MASK)
				>> HFI_VERSION_MINOR_SHIFT;

	if (msg == NULL) {
		dprintk(CVP_DBG,
		dprintk(CVP_WARN,
			"%s: session deleted, queue state %d, msg cnt %d\n",
			__func__, inst->session_queue.state,
			inst->session_queue.msg_count);

		spin_lock(&sq->lock);
		if (sq->msg_count) {
			sc->ctrl_data[0] = sq->msg_count;
			rc = -EUCLEAN;
		} else {
			rc = -ENOLINK;
		if (inst->state >= MSM_CVP_CLOSE_DONE) {
			rc = -ECONNRESET;
			goto exit;
		}
		spin_unlock(&sq->lock);

		msm_cvp_comm_kill_session(inst);
	} else {
		if (version >= 1) {
			u64 ktid;
			u32 kdata1, kdata2;

			kdata1 = msg->pkt.client_data.kdata1;
			kdata2 = msg->pkt.client_data.kdata2;
			ktid = ((u64)kdata2 << 32) | kdata1;
			msm_cvp_unmap_buf_cpu(inst, ktid);
		}

		memcpy(out_pkt, &msg->pkt,
			sizeof(struct cvp_hfi_msg_session_hdr));
		kmem_cache_free(inst->session_queue.msg_cache, msg);
	}

exit:
	s->cur_cmd_type = 0;
	cvp_put_inst(inst);
	return rc;
}
@@ -447,13 +639,35 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
	struct cvp_buf_desc *buf_ptr;
	struct cvp_buf_type *new_buf;
	int i, rc = 0;
	struct cvp_hfi_device *hdev = inst->core->device;
	struct iris_hfi_device *hfi = hdev->hfi_device_data;
	u32 version = hfi->version;
	u32 version;
	unsigned int iova;
	u64 ktid;
	struct msm_cvp_frame *frame;

	version = get_hfi_version();
	version = (version & HFI_VERSION_MINOR_MASK) >> HFI_VERSION_MINOR_SHIFT;

	if (offset != 0 && buf_num != 0) {
	if (version >= 1 && buf_num) {
		struct cvp_hfi_cmd_session_hdr *cmd_hdr;

		cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
		ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
		cmd_hdr->client_data.kdata1 = (u32)ktid;
		cmd_hdr->client_data.kdata2 = (u32)(ktid >> 32);

		frame = kmem_cache_zalloc(inst->frame_cache, GFP_KERNEL);
		if (!frame)
			return -ENOMEM;

		INIT_MSM_CVP_LIST(&frame->bufs);
		frame->ktid = ktid;
	} else {
		frame = NULL;
	}

	if (!offset || !buf_num)
		return 0;

	for (i = 0; i < buf_num; i++) {
		buf_ptr = (struct cvp_buf_desc *)
				&in_pkt->pkt_data[offset];
@@ -465,16 +679,45 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
		if (!buf_ptr->fd)
			continue;

			rc = msm_cvp_session_get_iova_addr(inst, &cbuf,
		if (version >= 1) {
			new_buf = (struct cvp_buf_type *)buf_ptr;
			rc = msm_cvp_map_buf_cpu(inst, new_buf, &iova, frame);
			if (rc) {
				struct msm_cvp_frame_buf *frame_buf, *dummy;

				dprintk(CVP_ERR,
					"%s: buf %d register failed.\n",
					__func__, i);

				list_for_each_entry_safe(frame_buf,
					dummy, &frame->bufs.list, list) {
					list_del(&frame_buf->list);
					__unmap_buf(inst, frame_buf);
					kmem_cache_free(
					inst->frame_buf_cache,
					frame_buf);
				}
				DEINIT_MSM_CVP_LIST(&frame->bufs);
				kmem_cache_free(inst->frame_cache,
						frame);
				return rc;
			}
			new_buf->fd = iova;
		} else {
			rc = msm_cvp_session_get_iova_addr_d(inst,
						&cbuf,
						buf_ptr->fd,
						buf_ptr->size,
						&buf_ptr->fd,
						&buf_ptr->size);

			if (rc == -ENOENT) {
				dprintk(CVP_DBG, "%s map buf fd %d size %d\n",
				dprintk(CVP_DBG,
					"%s map buf fd %d size %d\n",
					__func__, buf_ptr->fd,
					buf_ptr->size);
				rc = msm_cvp_map_buf_cpu(inst, buf_ptr->fd,
				rc = msm_cvp_map_buf_cpu_d(inst,
						buf_ptr->fd,
						buf_ptr->size, &cbuf);
				if (rc || !cbuf) {
					dprintk(CVP_ERR,
@@ -490,11 +733,21 @@ static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
				__func__, i, rc);
				return rc;
			}
			msm_cvp_smem_cache_operations(cbuf->smem.dma_buf,
			msm_cvp_smem_cache_operations(
					cbuf->smem.dma_buf,
					SMEM_CACHE_CLEAN_INVALIDATE,
					0, buf_ptr->size);
		}
	}


	if (frame != NULL) {
		mutex_lock(&inst->frames.lock);
		list_add_tail(&frame->list, &inst->frames.list);
		mutex_unlock(&inst->frames.lock);
		dprintk(CVP_DBG, "%s: map frame %llu\n", __func__, ktid);
	}

	return rc;
}

@@ -519,6 +772,7 @@ static int msm_cvp_session_process_hfi(
	if (!s)
		return -ECONNRESET;

	inst->cur_cmd_type = CVP_KMD_SEND_CMD_PKT;
	sq = &inst->session_queue;
	spin_lock(&sq->lock);
	if (sq->state != QUEUE_ACTIVE) {
@@ -575,10 +829,12 @@ static int msm_cvp_session_process_hfi(

	}
exit:
	inst->cur_cmd_type = 0;
	cvp_put_inst(inst);
	return rc;
}

#define CVP_FENCE_RUN	0x100
static int msm_cvp_thread_fence_run(void *data)
{
	int i, rc = 0;
@@ -600,13 +856,13 @@ static int msm_cvp_thread_fence_run(void *data)
	}

	fence_thread_data = data;
	inst = cvp_get_inst(get_cvp_core(fence_thread_data->device_id),
				(void *)fence_thread_data->inst);
	inst = fence_thread_data->inst;
	if (!inst) {
		dprintk(CVP_ERR, "%s Wrong inst %pK\n", __func__, inst);
		rc = -EINVAL;
		return rc;
	}
	inst->cur_cmd_type = CVP_FENCE_RUN;
	in_fence_pkt = (struct cvp_kmd_hfi_fence_packet *)
					&fence_thread_data->in_fence_pkt;
	in_pkt = (struct cvp_kmd_hfi_packet *)(in_fence_pkt);
@@ -815,6 +1071,7 @@ static int msm_cvp_thread_fence_run(void *data)

exit:
	kmem_cache_free(inst->fence_data_cache, fence_thread_data);
	inst->cur_cmd_type = 0;
	cvp_put_inst(inst);
	do_exit(rc);
}
@@ -844,12 +1101,14 @@ static int msm_cvp_session_process_hfi_fence(
	if (!s)
		return -ECONNRESET;

	inst->cur_cmd_type = CVP_KMD_SEND_FENCE_CMD_PKT;
	fence_thread_data = kmem_cache_alloc(inst->fence_data_cache,
			GFP_KERNEL);
	if (!fence_thread_data) {
		dprintk(CVP_ERR, "%s: fence_thread_data alloc failed\n",
				__func__);
		return -ENOMEM;
		rc = -ENOMEM;
		goto exit;
	}

	in_offset = arg->buf_offset;
@@ -888,10 +1147,17 @@ static int msm_cvp_session_process_hfi_fence(
				"thread_fence_%d", thread_num);
	thread = kthread_run(msm_cvp_thread_fence_run,
			fence_thread_data, thread_fence_name);
	if (!thread)
	if (!thread) {
		kmem_cache_free(inst->fence_data_cache, fence_thread_data);
		dprintk(CVP_ERR, "%s fail to create kthread\n", __func__);
		rc = -ECHILD;
		goto exit;
	}

	return 0;

exit:
	inst->cur_cmd_type = 0;
	cvp_put_inst(s);
	return rc;
}
@@ -1061,6 +1327,7 @@ static int msm_cvp_request_power(struct msm_cvp_inst *inst,
	if (!s)
		return -ECONNRESET;

	inst->cur_cmd_type = CVP_KMD_REQUEST_POWER;
	core = inst->core;

	mutex_lock(&core->lock);
@@ -1081,6 +1348,7 @@ static int msm_cvp_request_power(struct msm_cvp_inst *inst,
		dprintk(CVP_ERR, "Instance %pK power request out of range\n");

	mutex_unlock(&core->lock);
	inst->cur_cmd_type = 0;
	cvp_put_inst(s);

	return rc;
@@ -1106,6 +1374,7 @@ static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
	if (!s)
		return -ECONNRESET;

	inst->cur_cmd_type = CVP_KMD_REGISTER_BUFFER;
	session = (struct cvp_hal_session *)inst->session;
	if (!session) {
		dprintk(CVP_ERR, "%s: invalid session\n", __func__);
@@ -1117,6 +1386,7 @@ static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,

	rc = msm_cvp_map_buf_dsp(inst, buf);
exit:
	inst->cur_cmd_type = 0;
	cvp_put_inst(s);
	return rc;
}
@@ -1132,12 +1402,7 @@ static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
		return -EINVAL;
	}

	print_client_buffer(CVP_DBG, "unregister", inst, buf);

	if (!buf->index) {
		dprintk(CVP_INFO,
			"%s CPU path unregister buffer is deprecated!\n",
			__func__);
		return 0;
	}

@@ -1145,9 +1410,11 @@ static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
	if (!s)
		return -ECONNRESET;

	inst->cur_cmd_type = CVP_KMD_UNREGISTER_BUFFER;
	print_client_buffer(CVP_DBG, "unregister", inst, buf);

	rc = msm_cvp_unmap_buf_dsp(inst, buf);
	inst->cur_cmd_type = 0;
	cvp_put_inst(s);
	return rc;
}
@@ -1159,9 +1426,14 @@ static int msm_cvp_session_create(struct msm_cvp_inst *inst)
	if (!inst || !inst->core)
		return -EINVAL;

	if (inst->state >= MSM_CVP_CLOSE_DONE)
		return -ECONNRESET;

	if (inst->state != MSM_CVP_CORE_INIT_DONE ||
		inst->state > MSM_CVP_OPEN_DONE) {
		dprintk(CVP_ERR, "not ready create instance %d\n", inst->state);
		dprintk(CVP_ERR,
			"%s Incorrect CVP state %d to create session\n",
			__func__, inst->state);
		return -EINVAL;
	}

@@ -1361,8 +1633,9 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)

		rc = session_state_check_init(inst);
		if (rc) {
			dprintk(CVP_ERR, "session not ready for commands %d",
					arg->type);
			dprintk(CVP_ERR,
				"Incorrect session state %d for command %d",
				inst->state, arg->type);
			return rc;
		}
	}
@@ -1483,6 +1756,8 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
	int rc = 0;
	struct cvp_hal_session *session;
	struct msm_cvp_internal_buffer *cbuf, *dummy;
	struct msm_cvp_frame *frame, *dummy1;
	struct msm_cvp_frame_buf *frame_buf, *dummy2;

	if (!inst || !inst->core) {
		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -1516,6 +1791,7 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
				inst, &cbuf->buf);
		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
		list_del(&cbuf->list);
		kmem_cache_free(inst->internal_buf_cache, cbuf);
	}
	mutex_unlock(&inst->cvpcpubufs.lock);

@@ -1535,9 +1811,33 @@ int msm_cvp_session_deinit(struct msm_cvp_inst *inst)

		msm_cvp_smem_unmap_dma_buf(inst, &cbuf->smem);
		list_del(&cbuf->list);
		kmem_cache_free(inst->internal_buf_cache, cbuf);
	}
	mutex_unlock(&inst->cvpdspbufs.lock);

	mutex_lock(&inst->frames.lock);
	list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
		list_del(&frame->list);
		mutex_lock(&frame->bufs.lock);
		list_for_each_entry_safe(frame_buf, dummy2, &frame->bufs.list,
									list) {
			struct cvp_buf_type *buf = &frame_buf->buf;

			dprintk(CVP_ERR,
				"%s: %x : fd %d off %d size %d flags 0x%x\n",
				"remove from frame list",
				hash32_ptr(inst->session),
				buf->fd, buf->offset, buf->size, buf->flags);

			list_del(&frame_buf->list);
			kmem_cache_free(inst->frame_buf_cache, frame_buf);
		}
		mutex_unlock(&frame->bufs.lock);
		DEINIT_MSM_CVP_LIST(&frame->bufs);
		kmem_cache_free(inst->frame_cache, frame);
	}
	mutex_unlock(&inst->frames.lock);

	msm_cvp_comm_free_freq_table(inst);

	return rc;
Loading