Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ad6bc90a authored by Ravikishore Pampana's avatar Ravikishore Pampana
Browse files

msm: camera: isp: Add support for RDI only session



Add rdi only session sub state machine and irq state machine in the
cam isp context to handle the rdi only session use case. Set the
rdi only flag in the hardware manager context, if acquire resource
does not need pix path. add support to process all rdi instances
interrupts.

Change-Id: I987e7ba233d6da9830a11aadb768cf2047a8b501
Signed-off-by: default avatarRavikishore Pampana <rpampana@codeaurora.org>
parent 1a741900
Loading
Loading
Loading
Loading
+437 −0
Original line number Diff line number Diff line
@@ -879,6 +879,412 @@ static struct cam_ctx_ops
	},
};

static int __cam_isp_ctx_rdi_only_sof_in_top_state(
	struct cam_isp_context *ctx_isp, void *evt_data)
{
	int rc = 0;
	struct cam_context                    *ctx = ctx_isp->base;
	struct cam_req_mgr_sof_notify          notify;
	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
	uint64_t                               request_id  = 0;

	if (!evt_data) {
		pr_err("%s: in valid sof event data\n", __func__);
		return -EINVAL;
	}

	ctx_isp->frame_id++;
	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
	CDBG("%s: frame id: %lld time stamp:0x%llx\n", __func__,
		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);

	/*
	 * notify reqmgr with sof signal. Note, due to scheduling delay
	 * we can run into situation that two active requests has already
	 * be in the active queue while we try to do the notification.
	 * In this case, we need to skip the current notification. This
	 * helps the state machine to catch up the delay.
	 */
	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof &&
		ctx_isp->active_req_cnt <= 2) {
		notify.link_hdl = ctx->link_hdl;
		notify.dev_hdl = ctx->dev_hdl;
		notify.frame_id = ctx_isp->frame_id;

		ctx->ctx_crm_intf->notify_sof(&notify);
		CDBG("%s: Notify CRM  SOF frame %lld\n", __func__,
			ctx_isp->frame_id);

		/*
		 * It is idle frame with out any applied request id, send
		 * request id as zero
		 */
		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
	} else {
		pr_err("%s: Can not notify SOF to CRM\n", __func__);
	}

	if (list_empty(&ctx->active_req_list))
		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
	else
		CDBG("%s: Still need to wait for the buf done\n", __func__);

	CDBG("%s: next substate %d\n", __func__,
		ctx_isp->substate_activated);
	return rc;
}

static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
	struct cam_isp_context *ctx_isp, void *evt_data)
{
	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;

	if (!evt_data) {
		pr_err("%s: in valid sof event data\n", __func__);
		return -EINVAL;
	}

	ctx_isp->frame_id++;
	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
	CDBG("%s: frame id: %lld time stamp:0x%llx\n", __func__,
		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);

	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);

	return 0;
}

static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
	struct cam_isp_context *ctx_isp, void *evt_data)
{
	struct cam_ctx_request    *req;
	struct cam_isp_ctx_req    *req_isp;
	struct cam_context        *ctx = ctx_isp->base;
	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
	uint64_t  request_id = 0;

	ctx_isp->frame_id++;
	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
	CDBG("%s: frame id: %lld time stamp:0x%llx\n", __func__,
		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);

	if (list_empty(&ctx->pending_req_list)) {
		/*
		 * If no pending req in epoch, this is an error case.
		 * The recovery is to go back to sof state
		 */
		pr_err("%s: No pending request\n", __func__);
		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;

		/* Send SOF event as empty frame*/
		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
			CAM_REQ_MGR_SOF_EVENT_SUCCESS);

		goto end;
	}

	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
		list);
	req_isp = (struct cam_isp_ctx_req *)req->req_priv;

	CDBG("Report Bubble flag %d\n", req_isp->bubble_report);
	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
		ctx->ctx_crm_intf->notify_err) {
		struct cam_req_mgr_error_notify notify;

		notify.link_hdl = ctx->link_hdl;
		notify.dev_hdl = ctx->dev_hdl;
		notify.req_id = req->request_id;
		notify.error = CRM_KMD_ERR_BUBBLE;
		ctx->ctx_crm_intf->notify_err(&notify);
		CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
			ctx_isp->frame_id);
	} else {
		/*
		 * Since can not bubble report, always move the request to
		 * active list.
		 */
		list_del_init(&req->list);
		list_add_tail(&req->list, &ctx->active_req_list);
		ctx_isp->active_req_cnt++;
		CDBG("%s: move request %lld to active list(cnt = %d)\n",
			__func__, req->request_id, ctx_isp->active_req_cnt);
		req_isp->bubble_report = 0;
	}

	request_id = req->request_id;
	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
		CAM_REQ_MGR_SOF_EVENT_ERROR);

	/* change the state to bubble, as reg update has not come */
	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
end:
	return 0;
}

static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
	struct cam_isp_context *ctx_isp, void *evt_data)
{
	uint32_t i;
	struct cam_ctx_request                *req;
	struct cam_context                    *ctx = ctx_isp->base;
	struct cam_req_mgr_sof_notify          notify;
	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
	struct cam_isp_ctx_req                *req_isp;
	uint64_t                               request_id  = 0;

	if (!evt_data) {
		pr_err("%s: in valid sof event data\n", __func__);
		return -EINVAL;
	}

	ctx_isp->frame_id++;
	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
	CDBG("%s: frame id: %lld time stamp:0x%llx\n", __func__,
		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
	/*
	 * Signal all active requests with error and move the  all the active
	 * requests to free list
	 */
	while (!list_empty(&ctx->active_req_list)) {
		req = list_first_entry(&ctx->active_req_list,
				struct cam_ctx_request, list);
		list_del_init(&req->list);
		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
		CDBG("%s: signal fence in active list. fence num %d\n",
			__func__, req_isp->num_fence_map_out);
		for (i = 0; i < req_isp->num_fence_map_out; i++)
			if (req_isp->fence_map_out[i].sync_id != -1) {
				cam_sync_signal(
					req_isp->fence_map_out[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
			}
		list_add_tail(&req->list, &ctx->free_req_list);
	}

	/* notify reqmgr with sof signal */
	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
		notify.link_hdl = ctx->link_hdl;
		notify.dev_hdl = ctx->dev_hdl;
		notify.frame_id = ctx_isp->frame_id;

		ctx->ctx_crm_intf->notify_sof(&notify);
		CDBG("%s: Notify CRM  SOF frame %lld\n", __func__,
			ctx_isp->frame_id);

	} else {
		pr_err("%s: Can not notify SOF to CRM\n", __func__);
	}

	/*
	 * It is idle frame with out any applied request id, send
	 * request id as zero
	 */
	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
		CAM_REQ_MGR_SOF_EVENT_SUCCESS);

	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;

	CDBG("%s: next substate %d\n", __func__,
		ctx_isp->substate_activated);

	return 0;
}

static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
	struct cam_isp_context *ctx_isp, void *evt_data)
{
	struct cam_ctx_request  *req;
	struct cam_context      *ctx = ctx_isp->base;
	struct cam_isp_ctx_req  *req_isp;
	struct cam_req_mgr_sof_notify  notify;
	uint64_t  request_id  = 0;

	/* notify reqmgr with sof signal*/
	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
		if (list_empty(&ctx->pending_req_list)) {
			pr_err("Reg upd ack with no pending request\n");
			goto error;
		}
		req = list_first_entry(&ctx->pending_req_list,
				struct cam_ctx_request, list);
		list_del_init(&req->list);

		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
		request_id = req->request_id;
		if (req_isp->num_fence_map_out != 0) {
			list_add_tail(&req->list, &ctx->active_req_list);
			ctx_isp->active_req_cnt++;
			CDBG("%s: move request %lld to active list(cnt = %d)\n",
				__func__, req->request_id,
				ctx_isp->active_req_cnt);
		} else {
			/* no io config, so the request is completed. */
			list_add_tail(&req->list, &ctx->free_req_list);
			CDBG("%s:move active req %lld to free list(cnt=%d)\n",
				__func__, req->request_id,
				ctx_isp->active_req_cnt);
		}

		notify.link_hdl = ctx->link_hdl;
		notify.dev_hdl = ctx->dev_hdl;
		notify.frame_id = ctx_isp->frame_id;

		ctx->ctx_crm_intf->notify_sof(&notify);
		CDBG("%s: Notify CRM  SOF frame %lld\n", __func__,
			ctx_isp->frame_id);
	} else {
		pr_err("%s: Can not notify SOF to CRM\n", __func__);
	}
	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
		CAM_REQ_MGR_SOF_EVENT_SUCCESS);

	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);

	return 0;
error:
	/* Send SOF event as idle frame*/
	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
		CAM_REQ_MGR_SOF_EVENT_SUCCESS);

	/*
	 * There is no request in the pending list, move the sub state machine
	 * to SOF sub state
	 */
	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;

	return 0;
}

static struct cam_isp_ctx_irq_ops
	cam_isp_ctx_rdi_only_activated_state_machine_irq
			[CAM_ISP_CTX_ACTIVATED_MAX] = {
	/* SOF */
	{
		.irq_ops = {
			NULL,
			__cam_isp_ctx_rdi_only_sof_in_top_state,
			__cam_isp_ctx_reg_upd_in_sof,
			NULL,
			NULL,
			NULL,
		},
	},
	/* APPLIED */
	{
		.irq_ops = {
			__cam_isp_ctx_handle_error,
			__cam_isp_ctx_rdi_only_sof_in_applied_state,
			NULL,
			NULL,
			NULL,
			__cam_isp_ctx_buf_done_in_applied,
		},
	},
	/* EPOCH */
	{
		.irq_ops = {
			__cam_isp_ctx_handle_error,
			__cam_isp_ctx_rdi_only_sof_in_top_state,
			NULL,
			NULL,
			NULL,
			__cam_isp_ctx_buf_done_in_epoch,
		},
	},
	/* BUBBLE*/
	{
		.irq_ops = {
			__cam_isp_ctx_handle_error,
			__cam_isp_ctx_rdi_only_sof_in_bubble_state,
			NULL,
			NULL,
			NULL,
			__cam_isp_ctx_buf_done_in_bubble,
		},
	},
	/* BUBBLE APPLIED ie PRE_BUBBLE */
	{
		.irq_ops = {
			__cam_isp_ctx_handle_error,
			__cam_isp_ctx_rdi_only_sof_in_bubble_applied,
			__cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
			NULL,
			NULL,
			__cam_isp_ctx_buf_done_in_bubble_applied,
		},
	},

	/* HALT */
	{
	},
};

static int __cam_isp_ctx_rdi_only_apply_req_top_state(
	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
{
	int rc = 0;
	struct cam_isp_context *ctx_isp =
		(struct cam_isp_context *) ctx->ctx_priv;

	CDBG("%s: current substate %d\n", __func__,
		ctx_isp->substate_activated);
	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
		CAM_ISP_CTX_ACTIVATED_APPLIED);
	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);

	return rc;
}

static struct cam_ctx_ops
	cam_isp_ctx_rdi_only_activated_state_machine
		[CAM_ISP_CTX_ACTIVATED_MAX] = {
	/* SOF */
	{
		.ioctl_ops = {},
		.crm_ops = {
			.apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
		},
		.irq_ops = NULL,
	},
	/* APPLIED */
	{
		.ioctl_ops = {},
		.crm_ops = {},
		.irq_ops = NULL,
	},
	/* EPOCH */
	{
		.ioctl_ops = {},
		.crm_ops = {
			.apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
		},
		.irq_ops = NULL,
	},
	/* PRE BUBBLE */
	{
		.ioctl_ops = {},
		.crm_ops = {},
		.irq_ops = NULL,
	},
	/* BUBBLE */
	{
		.ioctl_ops = {},
		.crm_ops = {},
		.irq_ops = NULL,
	},
	/* HALT */
	{
		.ioctl_ops = {},
		.crm_ops = {},
		.irq_ops = NULL,
	},
};


/* top level state machine */
static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
@@ -1055,6 +1461,7 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
	struct cam_hw_release_args       release;
	struct cam_isp_context          *ctx_isp =
		(struct cam_isp_context *) ctx->ctx_priv;
	struct cam_isp_hw_cmd_args       hw_cmd_args;

	if (!ctx->hw_mgr_intf) {
		pr_err("HW interface is not ready!\n");
@@ -1108,6 +1515,36 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
		goto free_res;
	}

	/* Query the context has rdi only resource */
	hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
				&hw_cmd_args);
	if (rc) {
		pr_err("HW command failed\n");
		goto free_hw;
	}

	if (hw_cmd_args.u.is_rdi_only_context) {
		/*
		 * this context has rdi only resource assign rdi only
		 * state machine
		 */
		CDBG("%s: RDI only session Context\n", __func__);

		ctx_isp->substate_machine_irq =
			cam_isp_ctx_rdi_only_activated_state_machine_irq;
		ctx_isp->substate_machine =
			cam_isp_ctx_rdi_only_activated_state_machine;
	} else {
		CDBG("%s: Session has PIX or PIX and RDI resources\n",
			__func__);
		ctx_isp->substate_machine_irq =
			cam_isp_ctx_activated_state_machine_irq;
		ctx_isp->substate_machine =
			cam_isp_ctx_activated_state_machine;
	}

	ctx_isp->hw_ctx = param.ctxt_to_hw_map;

	req_hdl_param.session_hdl = cmd->session_handle;
+240 −123
Original line number Diff line number Diff line
@@ -930,6 +930,7 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
		csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
		csid_acquire.cid = cid_res_id;
		csid_acquire.in_port = in_port;
		csid_acquire.out_port = out_port;
		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;

		for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
@@ -1112,7 +1113,8 @@ static int cam_ife_mgr_acquire_cid_res(
}
static int cam_ife_mgr_acquire_hw_for_ctx(
	struct cam_ife_hw_mgr_ctx          *ife_ctx,
	struct cam_isp_in_port_info        *in_port)
	struct cam_isp_in_port_info        *in_port,
	uint32_t  *num_pix_port, uint32_t  *num_rdi_port)
{
	int rc                                    = -1;
	int is_dual_vfe                           = 0;
@@ -1183,6 +1185,9 @@ static int cam_ife_mgr_acquire_hw_for_ctx(
		goto err;
	}

	*num_pix_port += pixel_count;
	*num_rdi_port += rdi_count;

	return 0;
err:
	/* release resource at the acquire entry funciton */
@@ -1196,7 +1201,6 @@ void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
		__func__, handle, userdata, status, cookie);
}


/* entry function: acquire_hw */
static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
					void *acquire_hw_args)
@@ -1209,6 +1213,8 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
	struct cam_isp_in_port_info       *in_port = NULL;
	struct cam_isp_resource           *isp_resource = NULL;
	struct cam_cdm_acquire_data        cdm_acquire;
	uint32_t                           num_pix_port = 0;
	uint32_t                           num_rdi_port = 0;

	CDBG("%s: Enter...\n", __func__);

@@ -1270,7 +1276,8 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
		in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
			isp_resource[i].length);
		if (in_port > 0) {
			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port);
			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
				&num_pix_port, &num_rdi_port);
			kfree(in_port);
			if (rc) {
				pr_err("%s: can not acquire resource!\n",
@@ -1284,6 +1291,11 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
			goto free_res;
		}
	}

	/* Check whether context has only RDI resource */
	if (!num_pix_port)
		ife_ctx->is_rdi_only_context = 1;

	/* Process base info */
	rc = cam_ife_mgr_process_base_info(ife_ctx);
	if (rc) {
@@ -1982,6 +1994,41 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
	return rc;
}

static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
{
	int rc = 0;
	struct cam_isp_hw_cmd_args  *hw_cmd_args  = cmd_args;
	struct cam_ife_hw_mgr_ctx   *ctx;

	if (!hw_mgr_priv || !cmd_args) {
		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
		return -EINVAL;
	}

	ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
	if (!ctx || !ctx->ctx_in_use) {
		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
		return -EPERM;
	}

	switch (hw_cmd_args->cmd_type) {
	case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
		if (ctx->is_rdi_only_context)
			hw_cmd_args->u.is_rdi_only_context = 1;
		else
			hw_cmd_args->u.is_rdi_only_context = 0;

		break;
	default:
		pr_err("%s: Invalid HW mgr command:0x%x\n", __func__,
			hw_cmd_args->cmd_type);
		rc = -EINVAL;
		break;
	}

	return rc;
}

static int cam_ife_mgr_cmd_get_sof_timestamp(
	struct cam_ife_hw_mgr_ctx      *ife_ctx,
	uint64_t                       *time_stamp)
@@ -2294,14 +2341,14 @@ static int cam_ife_hw_mgr_handle_camif_error(
 * of dual VFE.
 * RDI path does not support DUAl VFE
 */
static int cam_ife_hw_mgr_handle_rup_for_camif_hw_res(
static int cam_ife_hw_mgr_handle_reg_update(
	void                              *handler_priv,
	void                              *payload)
{
	struct cam_isp_resource_node            *hw_res;
	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
	struct cam_vfe_top_irq_evt_payload      *evt_payload;
	struct cam_ife_hw_mgr_res               *isp_ife_camif_res = NULL;
	struct cam_ife_hw_mgr_res               *ife_src_res = NULL;
	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
	struct cam_isp_hw_reg_update_event_data  rup_event_data;
	uint32_t  core_idx;
@@ -2322,21 +2369,21 @@ static int cam_ife_hw_mgr_handle_rup_for_camif_hw_res(
		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];

	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
	list_for_each_entry(isp_ife_camif_res,
	list_for_each_entry(ife_src_res,
			&ife_hwr_mgr_ctx->res_list_ife_src, list) {

		if (isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
			continue;

		CDBG("%s: camif resource id = %d, curr_core_idx = %d\n",
			__func__, isp_ife_camif_res->res_id, core_idx);
		switch (isp_ife_camif_res->res_id) {
		CDBG("%s:resource id = %d, curr_core_idx = %d\n",
			__func__, ife_src_res->res_id, core_idx);
		switch (ife_src_res->res_id) {
		case CAM_ISP_HW_VFE_IN_CAMIF:
			if (isp_ife_camif_res->is_dual_vfe)
			if (ife_src_res->is_dual_vfe)
				/* It checks for slave core RUP ACK*/
				hw_res = isp_ife_camif_res->hw_res[1];
				hw_res = ife_src_res->hw_res[1];
			else
				hw_res = isp_ife_camif_res->hw_res[0];
				hw_res = ife_src_res->hw_res[0];

			if (!hw_res) {
				pr_err("%s: CAMIF device is NULL\n", __func__);
@@ -2350,31 +2397,48 @@ static int cam_ife_hw_mgr_handle_rup_for_camif_hw_res(
				rup_status = hw_res->bottom_half_handler(
					hw_res, evt_payload);
			}

			if (!rup_status) {
				ife_hwr_irq_rup_cb(
					ife_hwr_mgr_ctx->common.cb_priv,
					CAM_ISP_HW_EVENT_REG_UPDATE,
					&rup_event_data);
			}
			break;

		case CAM_ISP_HW_VFE_IN_RDI0:
		case CAM_ISP_HW_VFE_IN_RDI1:
		case CAM_ISP_HW_VFE_IN_RDI2:
			hw_res = isp_ife_camif_res->hw_res[0];
		case CAM_ISP_HW_VFE_IN_RDI3:
			if (!ife_hwr_mgr_ctx->is_rdi_only_context)
				continue;

			/*
			 * This is RDI only context, send Reg update and epoch
			 * HW event to cam context
			 */
			hw_res = ife_src_res->hw_res[0];

			if (!hw_res) {
				pr_err("%s: RDI Device is NULL\n", __func__);
				break;
			}

			if (core_idx == hw_res->hw_intf->hw_idx)
				/* Need to process rdi reg update */
				rup_status = -EINVAL;
				rup_status = hw_res->bottom_half_handler(
					hw_res, evt_payload);

			if (!rup_status) {
				/* Send the Reg update hw event */
				ife_hwr_irq_rup_cb(
					ife_hwr_mgr_ctx->common.cb_priv,
					CAM_ISP_HW_EVENT_REG_UPDATE,
					&rup_event_data);
			}
			break;
		default:
			pr_err("%s: invalid resource id (%d)", __func__,
				isp_ife_camif_res->res_id);
		}

		/* only do callback for pixel reg update for now */
		if (!rup_status && (isp_ife_camif_res->res_id ==
			CAM_ISP_HW_VFE_IN_CAMIF)) {
			ife_hwr_irq_rup_cb(ife_hwr_mgr_ctx->common.cb_priv,
				CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
				ife_src_res->res_id);
		}

	}
@@ -2563,50 +2627,31 @@ static int cam_ife_hw_mgr_check_sof_for_dual_vfe(
	return rc;
}

static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
	void                              *handler_priv,
	void                              *payload)
static int cam_ife_hw_mgr_process_camif_sof(
	struct cam_ife_hw_mgr_res            *isp_ife_camif_res,
	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx,
	struct cam_vfe_top_irq_evt_payload   *evt_payload)
{
	int32_t rc = -1;
	struct cam_isp_hw_sof_event_data      sof_done_event_data;
	cam_hw_event_cb_func                  ife_hwr_irq_sof_cb;
	struct cam_isp_resource_node         *hw_res_l = NULL;
	struct cam_isp_resource_node         *hw_res_r = NULL;
	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
	struct cam_vfe_top_irq_evt_payload   *evt_payload;
	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
	cam_hw_event_cb_func                  ife_hwr_irq_sof_cb;
	struct cam_isp_hw_sof_event_data      sof_done_event_data;
	int32_t rc = -EINVAL;
	uint32_t  core_idx;
	uint32_t  sof_status = 0;
	uint32_t  core_index0;
	uint32_t  core_index1;

	CDBG("%s:Enter\n", __func__);

	ife_hwr_mgr_ctx = handler_priv;
	evt_payload = payload;
	if (!evt_payload) {
		pr_err("%s: no payload\n", __func__);
		return IRQ_HANDLED;
	}
	core_idx = evt_payload->core_index;
	ife_hwr_irq_sof_cb =
		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];

	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;

	list_for_each_entry(isp_ife_camif_res,
		&ife_hwr_mgr_ctx->res_list_ife_src, list) {

		if ((isp_ife_camif_res->res_type ==
			CAM_IFE_HW_MGR_RES_UNINIT) ||
			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
			continue;

	hw_res_l = isp_ife_camif_res->hw_res[0];
	hw_res_r = isp_ife_camif_res->hw_res[1];

	CDBG("%s:is_dual_vfe ? = %d\n", __func__,
		isp_ife_camif_res->is_dual_vfe);

	ife_hwr_irq_sof_cb =
		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];

	switch (isp_ife_camif_res->is_dual_vfe) {
	/* Handling Single VFE Scenario */
	case 0:
@@ -2621,8 +2666,8 @@ static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
				hw_res_l->hw_intf->hw_idx);

		if (core_idx == hw_res_l->hw_intf->hw_idx) {
				sof_status = hw_res_l->bottom_half_handler(
					hw_res_l, evt_payload);
			sof_status = hw_res_l->bottom_half_handler(hw_res_l,
				evt_payload);
			if (!sof_status) {
				cam_ife_mgr_cmd_get_sof_timestamp(
					ife_hwr_mgr_ctx,
@@ -2646,7 +2691,7 @@ static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
				__func__);
			break;
		}
			CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
		CDBG("%s: curr_core_idx = %d, res hw idx= %d\n",
				__func__, core_idx,
				hw_res_l->hw_intf->hw_idx);

@@ -2663,12 +2708,12 @@ static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
				__func__);
			break;
		}
			CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
		CDBG("%s: curr_core_idx = %d, ews hw idx= %d\n",
				__func__, core_idx,
				hw_res_r->hw_intf->hw_idx);
		if (core_idx == hw_res_r->hw_intf->hw_idx) {
				sof_status = hw_res_r->bottom_half_handler(
					hw_res_r, evt_payload);
			sof_status = hw_res_r->bottom_half_handler(hw_res_r,
				evt_payload);
			if (!sof_status)
				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
		}
@@ -2676,27 +2721,98 @@ static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
		core_index0 = hw_res_l->hw_intf->hw_idx;
		core_index1 = hw_res_r->hw_intf->hw_idx;

			rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(
				ife_hwr_mgr_ctx, core_index0, core_index1);
		rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(ife_hwr_mgr_ctx,
			core_index0, core_index1);

			if (!rc) {
		if (!rc)
			ife_hwr_irq_sof_cb(ife_hwr_mgr_ctx->common.cb_priv,
				CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);

		break;

	default:
		pr_err("%s: error with hw_res\n", __func__);
		break;
	}

	CDBG("%s: Exit (sof_status = %d)!\n", __func__, sof_status);

	return 0;
}

static int cam_ife_hw_mgr_handle_sof(
	void                              *handler_priv,
	void                              *payload)
{
	int32_t rc = -EINVAL;
	struct cam_isp_resource_node         *hw_res = NULL;
	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx;
	struct cam_vfe_top_irq_evt_payload   *evt_payload;
	struct cam_ife_hw_mgr_res            *ife_src_res = NULL;
	cam_hw_event_cb_func                  ife_hw_irq_sof_cb;
	struct cam_isp_hw_sof_event_data      sof_done_event_data;
	uint32_t  sof_status = 0;

	CDBG("%s:Enter\n", __func__);

	ife_hw_mgr_ctx = handler_priv;
	evt_payload = payload;
	if (!evt_payload) {
		pr_err("%s: no payload\n", __func__);
		return IRQ_HANDLED;
	}
	ife_hw_irq_sof_cb =
		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];

	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;

	list_for_each_entry(ife_src_res,
		&ife_hw_mgr_ctx->res_list_ife_src, list) {

		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
			continue;

		switch (ife_src_res->res_id) {
		case CAM_ISP_HW_VFE_IN_RDI0:
		case CAM_ISP_HW_VFE_IN_RDI1:
		case CAM_ISP_HW_VFE_IN_RDI2:
		case CAM_ISP_HW_VFE_IN_RDI3:
			/* check if it is rdi only context */
			if (ife_hw_mgr_ctx->is_rdi_only_context) {
				hw_res = ife_src_res->hw_res[0];
				sof_status = hw_res->bottom_half_handler(
					hw_res, evt_payload);

				if (!sof_status) {
					cam_ife_mgr_cmd_get_sof_timestamp(
					ife_hwr_mgr_ctx,
						ife_hw_mgr_ctx,
						&sof_done_event_data.timestamp);

				ife_hwr_irq_sof_cb(
					ife_hwr_mgr_ctx->common.cb_priv,
					ife_hw_irq_sof_cb(
						ife_hw_mgr_ctx->common.cb_priv,
						CAM_ISP_HW_EVENT_SOF,
						&sof_done_event_data);
				}

				CDBG("%s: sof_status = %d\n", __func__,
					sof_status);

				/* this is RDI only context so exit from here */
				return 0;
			}
			break;

		case CAM_ISP_HW_VFE_IN_CAMIF:
			rc = cam_ife_hw_mgr_process_camif_sof(ife_src_res,
				ife_hw_mgr_ctx, evt_payload);
			break;
		default:
			pr_err("%s: error with hw_res\n", __func__);
			pr_err("%s: Invalid resource id :%d\n", __func__,
				ife_src_res->res_id);
			break;
		}
	}

	CDBG("%s: Exit (sof_status = %d)!\n", __func__, sof_status);
	return 0;
}

@@ -2889,12 +3005,12 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)

	CDBG("%s: Calling SOF\n", __func__);
	/* SOF IRQ */
	cam_ife_hw_mgr_handle_sof_for_camif_hw_res(ife_hwr_mgr_ctx,
	cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
		evt_payload_priv);

	CDBG("%s: Calling RUP\n", __func__);
	/* REG UPDATE */
	cam_ife_hw_mgr_handle_rup_for_camif_hw_res(ife_hwr_mgr_ctx,
	cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
		evt_payload_priv);

	CDBG("%s: Calling EPOCH\n", __func__);
@@ -3095,6 +3211,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
	hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;

	CDBG("%s: Exit\n", __func__);
	return 0;
+2 −0

File changed.

Preview size limit exceeded, changes collapsed.

+22 −0

File changed.

Preview size limit exceeded, changes collapsed.

+1 −0

File changed.

Preview size limit exceeded, changes collapsed.

Loading