Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9831108a authored by Shivakumar Malke's avatar Shivakumar Malke
Browse files

msm: camera: isp: Handle early bufdones



In case of early bufones, signalling fence for few
ports are missed for target which do not support
last consumed address which result in a unsignalled fence

Issue is fixed by handling early bufdones and signalling
success for resource which got early bufdone.

CRs-Fixed: 3333269
Change-Id: I0a56f770806d48034bcc45d2ca68d8f9adcc8eee
Signed-off-by: default avatarShivakumar Malke <quic_smalke@quicinc.com>
parent 5685ecf2
Loading
Loading
Loading
Loading
+110 −11
Original line number Original line Diff line number Diff line
@@ -894,6 +894,33 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
	return rc;
	return rc;
}
}


static int __cam_isp_ctx_handle_early_buf_done(
	struct cam_isp_context *ctx_isp,
	struct cam_ctx_request  *req)
{
	int i, rc = 0;

	struct cam_isp_ctx_req *req_isp = (struct cam_isp_ctx_req *) req->req_priv;
	struct cam_context *ctx = ctx_isp->base;

	for (i = 0; i < req_isp->num_fence_map_out; i++) {
		if (req_isp->early_fence_map_index[i] == 1) {
			rc = cam_sync_signal(req_isp->fence_map_out[i].sync_id,
				CAM_SYNC_STATE_SIGNALED_SUCCESS,
				CAM_SYNC_COMMON_EVENT_SUCCESS);
			req_isp->early_fence_map_index[i] = 0;
			req_isp->fence_map_out[i].sync_id = -1;
			CAM_DBG(CAM_ISP,
				"Sync :req %lld res 0x%x fd 0x%x idx %d ctx %u",
				req->request_id,
				req_isp->fence_map_out[i].resource_handle,
				req_isp->fence_map_out[i].sync_id, i,
				ctx->ctx_id);
			req_isp->flag_sync_set = true;
		}
	}
	return rc;
}
static int __cam_isp_ctx_handle_buf_done_for_request(
static int __cam_isp_ctx_handle_buf_done_for_request(
	struct cam_isp_context *ctx_isp,
	struct cam_isp_context *ctx_isp,
	struct cam_ctx_request  *req,
	struct cam_ctx_request  *req,
@@ -968,6 +995,13 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
		}
		}


		if (!req_isp->bubble_detected) {
		if (!req_isp->bubble_detected) {
			/* Signal fence for early bufdones got during reapply of a bubble req*/
			if ((req_isp->num_acked != 0) && (!req_isp->flag_sync_set)) {
				rc = __cam_isp_ctx_handle_early_buf_done(ctx_isp, req);
				if (rc)
					CAM_ERR(CAM_ISP, "Sync early bufdone failed rc=%d", rc);
			}

			CAM_DBG(CAM_ISP,
			CAM_DBG(CAM_ISP,
				"Sync with success: req %lld res 0x%x fd 0x%x, ctx %u, Bad_frame %u",
				"Sync with success: req %lld res 0x%x fd 0x%x, ctx %u, Bad_frame %u",
				req->request_id,
				req->request_id,
@@ -1017,18 +1051,19 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
			 */
			 */
			req_isp->num_acked++;
			req_isp->num_acked++;
			CAM_DBG(CAM_ISP,
			CAM_DBG(CAM_ISP,
				"buf done with bubble state %d recovery %d",
				"buf done with bubble state %d recovery %d num_acked %d ctx %u",
				bubble_state, req_isp->bubble_report);
				bubble_state, req_isp->bubble_report,
				req_isp->num_acked, ctx->ctx_id);
			continue;
			continue;
		}
		}


		CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
			req->request_id,
			req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
		if (!rc) {
		if (!rc) {
			req_isp->num_acked++;
			req_isp->num_acked++;
			req_isp->fence_map_out[j].sync_id = -1;
			req_isp->fence_map_out[j].sync_id = -1;
		}
		}
		CAM_DBG(CAM_ISP, "rc %d num_acked %d req %lld, reset sync id 0x%x ctx %u",
			rc, req_isp->num_acked, req->request_id,
			req_isp->fence_map_out[j].sync_id, ctx->ctx_id);


		if ((ctx_isp->use_frame_header_ts) &&
		if ((ctx_isp->use_frame_header_ts) &&
			(req_isp->hw_update_data.frame_header_res_id ==
			(req_isp->hw_update_data.frame_header_res_id ==
@@ -1275,8 +1310,10 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
			 */
			 */
			req_isp->num_acked++;
			req_isp->num_acked++;
			CAM_DBG(CAM_ISP,
			CAM_DBG(CAM_ISP,
				"buf done with bubble state %d recovery %d",
				"num_acked %d buf done with bubble state %d recovery %d ctx %d",
				bubble_state, req_isp->bubble_report);
				req_isp->num_acked, bubble_state,
				req_isp->bubble_report,
				ctx->ctx_id);
				/* Process deferred buf_done acks */
				/* Process deferred buf_done acks */


			if (req_isp->num_deferred_acks)
			if (req_isp->num_deferred_acks)
@@ -1288,13 +1325,13 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
			continue;
			continue;
		}
		}


		CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
			req->request_id,
			req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
		if (!rc) {
		if (!rc) {
			req_isp->num_acked++;
			req_isp->num_acked++;
			req_isp->fence_map_out[j].sync_id = -1;
			req_isp->fence_map_out[j].sync_id = -1;
		}
		}
		CAM_DBG(CAM_ISP, "num_acked %d req %lld, reset sync id 0x%x ctx %u",
			req_isp->num_acked, req->request_id,
			req_isp->fence_map_out[j].sync_id, ctx->ctx_id);


		if ((ctx_isp->use_frame_header_ts) &&
		if ((ctx_isp->use_frame_header_ts) &&
			(req_isp->hw_update_data.frame_header_res_id ==
			(req_isp->hw_update_data.frame_header_res_id ==
@@ -1329,9 +1366,60 @@ static int __cam_isp_ctx_handle_buf_done(
	struct cam_ctx_request *req;
	struct cam_ctx_request *req;
	struct cam_context *ctx = ctx_isp->base;
	struct cam_context *ctx = ctx_isp->base;
	struct cam_isp_hw_done_event_data done_next_req;
	struct cam_isp_hw_done_event_data done_next_req;
	int i, j;


	if (list_empty(&ctx->active_req_list)) {
	if (list_empty(&ctx->active_req_list)) {
		CAM_WARN(CAM_ISP, "Buf done with no active request");
		CAM_WARN(CAM_ISP, "Buf done with no active request");

		if (!list_empty(&ctx->wait_req_list)) {
			struct cam_isp_ctx_req *req_isp;

			req = list_first_entry(&ctx->wait_req_list,
					struct cam_ctx_request, list);

			if (ctx_isp->last_applied_req_id !=
				ctx_isp->last_bufdone_err_apply_req_id) {
				CAM_WARN(CAM_ISP,
					"Buf done with req %llu in wait list apply id:%lld last err id:%lld",
					req->request_id,
					ctx_isp->last_applied_req_id,
					ctx_isp->last_bufdone_err_apply_req_id);
				ctx_isp->last_bufdone_err_apply_req_id =
					ctx_isp->last_applied_req_id;
			}

			req_isp = (struct cam_isp_ctx_req *) req->req_priv;

			/*
			 * do not signal the fence as this request may go into
			 * Bubble state eventually.
			 */
			for (i = 0; i < done->num_handles; i++) {
				for (j = 0; j < req_isp->num_fence_map_out; j++) {
					if (done->resource_handle[i] ==
						req_isp->fence_map_out[j].resource_handle) {
						req_isp->num_acked++;
						/*
						 * save the fence map out index for signalling
						 * fence during re-apply of bubble request.
						 */
						req_isp->early_fence_map_index[j]++;

						CAM_WARN(CAM_ISP, "Early done req %lld res 0x%x",
							req->request_id,
							done->resource_handle[i]);

						CAM_WARN(CAM_ISP, "ctx %u ack %d total %d idx %d",
							ctx->ctx_id,
							req_isp->num_acked,
							req_isp->num_fence_map_out, j);
						break;
					}
				}
			}
		} else {
			CAM_WARN(CAM_ISP, "Buf done with no request in wait as well");
		}
		return 0;
		return 0;
	}
	}


@@ -2031,6 +2119,7 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
	void *evt_data)
	void *evt_data)
{
{
	uint64_t request_id = 0;
	uint64_t request_id = 0;
	uint32_t i;
	uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
	uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
	struct cam_req_mgr_trigger_notify   notify;
	struct cam_req_mgr_trigger_notify   notify;
	struct cam_ctx_request             *req;
	struct cam_ctx_request             *req;
@@ -2068,6 +2157,10 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
	req_isp->reapply = true;
	req_isp->reapply = true;
	req_isp->cdm_reset_before_apply = false;
	req_isp->cdm_reset_before_apply = false;


	/* reset early_fence_map_index array */
	for (i = 0; i < req_isp->num_fence_map_out; i++)
		req_isp->early_fence_map_index[i] = 0;

	CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
	CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
		ctx->ctx_id, req_isp->bubble_report, req->request_id);
		ctx->ctx_id, req_isp->bubble_report, req->request_id);
	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
@@ -3128,7 +3221,7 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
	enum cam_isp_ctx_activated_substate next_state)
	enum cam_isp_ctx_activated_substate next_state)
{
{
	int rc = 0;
	int rc = 0, i;
	struct cam_ctx_request          *req;
	struct cam_ctx_request          *req;
	struct cam_ctx_request          *active_req = NULL;
	struct cam_ctx_request          *active_req = NULL;
	struct cam_isp_ctx_req          *req_isp;
	struct cam_isp_ctx_req          *req_isp;
@@ -3225,6 +3318,12 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
	}
	}
	req_isp->bubble_report = apply->report_if_bubble;
	req_isp->bubble_report = apply->report_if_bubble;


	/* reset early_fence_map_index */
	for (i = 0; i <  req_isp->num_fence_map_out; i++)
		req_isp->early_fence_map_index[i] = 0;

	req_isp->flag_sync_set = false;

	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
	cfg.request_id = req->request_id;
	cfg.request_id = req->request_id;
	cfg.hw_update_entries = req_isp->cfg;
	cfg.hw_update_entries = req_isp->cfg;
+7 −2
Original line number Original line Diff line number Diff line
@@ -143,6 +143,10 @@ struct cam_isp_ctx_irq_ops {
 * @num_acked:                 Count to track acked entried for output.
 * @num_acked:                 Count to track acked entried for output.
 *                             If count equals the number of fence out, it means
 *                             If count equals the number of fence out, it means
 *                             the request has been completed.
 *                             the request has been completed.
 * @early_fence_map_index      Array which stores fence map out index for resource
 *                             which early buf done is received.
 * @flag_sync_set              Flags to track if early bufdone fence are signalled
 *                             or not
 * @num_deferred_acks:         Number of buf_dones/acks that are deferred to
 * @num_deferred_acks:         Number of buf_dones/acks that are deferred to
 *                             handle or signalled in special scenarios.
 *                             handle or signalled in special scenarios.
 *                             Increment this count instead of num_acked and
 *                             Increment this count instead of num_acked and
@@ -163,12 +167,13 @@ struct cam_isp_ctx_req {
	struct cam_ctx_request               *base;
	struct cam_ctx_request               *base;
	struct cam_hw_update_entry            cfg[CAM_ISP_CTX_CFG_MAX];
	struct cam_hw_update_entry            cfg[CAM_ISP_CTX_CFG_MAX];
	uint32_t                              num_cfg;
	uint32_t                              num_cfg;
	struct cam_hw_fence_map_entry         fence_map_out
	struct cam_hw_fence_map_entry         fence_map_out[CAM_ISP_CTX_RES_MAX];
						[CAM_ISP_CTX_RES_MAX];
	uint32_t                              num_fence_map_out;
	uint32_t                              num_fence_map_out;
	struct cam_hw_fence_map_entry         fence_map_in[CAM_ISP_CTX_RES_MAX];
	struct cam_hw_fence_map_entry         fence_map_in[CAM_ISP_CTX_RES_MAX];
	uint32_t                              num_fence_map_in;
	uint32_t                              num_fence_map_in;
	uint32_t                              num_acked;
	uint32_t                              num_acked;
	uint32_t                     early_fence_map_index[CAM_ISP_CTX_RES_MAX];
	bool                                  flag_sync_set;
	uint32_t                              num_deferred_acks;
	uint32_t                              num_deferred_acks;
	uint32_t                  deferred_fence_map_index[CAM_ISP_CTX_RES_MAX];
	uint32_t                  deferred_fence_map_index[CAM_ISP_CTX_RES_MAX];
	int32_t                               bubble_report;
	int32_t                               bubble_report;