Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb920830 authored by Shivakumar Malke's avatar Shivakumar Malke
Browse files

msm: camera: isp: Handle deferred bufdone and bubble cases



- In cases where bufdone is handled without request being in
  active list, there could be possibility that the request
  is applied just before SOF, RUP but the context state is
  not moved to applied state, at this time the request is in
  wait_list. In such cases RUP event doesn't move the request
  into active_list. Also, if the bufdone on a port is comes
  before EPOCH, that buf_done will be dropped and then the
  request will eventually be tagged as BUBBLE at the time of
  EPOCH. since buf_done is dropped, the request will never
  come out of BUBBLE. To handle such cases and to come out of
  BUBBLE, check if BUF_DONE matches with the request in wait_list
  by checking last_consumed address and if so, mark it as deferred
  buf_done and handle once the request is moved to active_list.

- When there are deferred bufdones even before bubble is
  detected, then such deferred bufdones need to handled during
  bubble otherwise bubble request cannot be finished. This change
  handles deferred bufdone in case of bubble.

CRs-Fixed: 3468612
Change-Id: I17b71f693a7d12cd7ba9dd38a94f0103c039b2d1
Signed-off-by: default avatarShivakumar Malke <quic_smalke@quicinc.com>
parent 9a2fd8d3
Loading
Loading
Loading
Loading
+233 −8
Original line number Diff line number Diff line
@@ -754,6 +754,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(

	if (req_isp->bubble_detected && req_isp->bubble_report) {
		req_isp->num_acked = 0;
		req_isp->num_deferred_acks = 0;
		req_isp->bubble_detected = false;
		list_del_init(&req->list);
		atomic_set(&ctx_isp->process_bubble, 0);
@@ -941,12 +942,112 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
	return rc;
}

static int __cam_isp_handle_deferred_buf_done(
	struct cam_isp_context *ctx_isp,
	struct cam_ctx_request  *req,
	bool bubble_handling,
	uint32_t status)
{
	int i, j;
	int rc = 0;
	struct cam_isp_ctx_req *req_isp =
		(struct cam_isp_ctx_req *) req->req_priv;
	struct cam_context *ctx = ctx_isp->base;

	CAM_DBG(CAM_ISP,
		"ctx[%d] : Req %llu : Handling %d deferred buf_dones acked=%d, bubble_handling=%d",
		ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
		req_isp->num_acked, bubble_handling);

	for (i = 0; i < req_isp->num_deferred_acks; i++) {
		j = req_isp->deferred_fence_map_index[i];

		CAM_DBG(CAM_ISP,
			"ctx[%d] : Sync with status=%d, req %lld res 0x%x sync_id 0x%x",
			ctx->ctx_id, status,
			req->request_id,
			req_isp->fence_map_out[j].resource_handle,
			req_isp->fence_map_out[j].sync_id);

		if (req_isp->fence_map_out[j].sync_id == -1) {
			CAM_WARN(CAM_ISP,
				"ctx[%d] Deferred done already signaled, req=%llu, j=%d, res=0x%x",
				ctx->ctx_id, req->request_id, j,
				req_isp->fence_map_out[j].resource_handle);
			continue;
		}

		if (!bubble_handling) {
			CAM_WARN(CAM_ISP,
				"ctx[%d] : Req %llu, status=%d res=0x%x should never happen",
				ctx->ctx_id, req->request_id, status,
				req_isp->fence_map_out[j].resource_handle);

			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
				status);
			if (rc) {
				CAM_ERR(CAM_ISP,
					"ctx[%d] Sync failed Req %llu, sync_id %d status %d rc %d",
					ctx->ctx_id, req->request_id,
					req_isp->fence_map_out[j].sync_id,
					status, rc);
			} else {
				req_isp->num_acked++;
				req_isp->fence_map_out[j].sync_id = -1;
			}
		} else {
			req_isp->num_acked++;
		}
	}

	CAM_DBG(CAM_ISP,
		"ctx[%d] : Req %llu : Handled %d deferred buf_dones num_acked=%d, map_out=%d",
		ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
		req_isp->num_acked, req_isp->num_fence_map_out);

	req_isp->num_deferred_acks = 0;

	return rc;
}

static int __cam_isp_ctx_handle_deferred_buf_done_in_bubble(
	struct cam_isp_context *ctx_isp,
	struct cam_ctx_request  *req)
{
	int                     rc = 0;
	struct cam_context     *ctx = ctx_isp->base;
	struct cam_isp_ctx_req *req_isp;

	req_isp = (struct cam_isp_ctx_req *)req->req_priv;

	if (req_isp->num_deferred_acks)
		rc = __cam_isp_handle_deferred_buf_done(ctx_isp, req,
			req_isp->bubble_report,
			CAM_SYNC_STATE_SIGNALED_ERROR);

	if (req_isp->num_acked > req_isp->num_fence_map_out) {
		/* Should not happen */
		CAM_ERR(CAM_ISP,
			"WARNING:req_id %lld num_acked %d > map_out %d, ctx %u",
			req->request_id, req_isp->num_acked,
			req_isp->num_fence_map_out, ctx->ctx_id);
		WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
	}

	if (req_isp->num_acked == req_isp->num_fence_map_out)
		rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);

	return rc;
}


static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
	struct cam_isp_context *ctx_isp,
	struct cam_ctx_request  *req,
	struct cam_isp_hw_done_event_data *done,
	uint32_t bubble_state,
	bool verify_consumed_addr)
	bool verify_consumed_addr,
	bool defer_buf_done)
{
	int rc = 0;
	int i, j;
@@ -1002,7 +1103,32 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
			continue;
		}

		if (!req_isp->bubble_detected) {
		if (defer_buf_done) {
			uint32_t deferred_indx = req_isp->num_deferred_acks;

			/*
			 * If we are handling this BUF_DONE event for a request
			 * that is still in wait_list, do not signal now,
			 * instead mark it as done and handle it later -
			 * if this request is going into BUBBLE state later
			 * it will automatically be re-applied. If this is not
			 * going into BUBBLE, signal fences later.
			 * Note - we will come here only if the last consumed
			 * address matches with this ports buffer.
			 */
			req_isp->deferred_fence_map_index[deferred_indx] = j;
			req_isp->num_deferred_acks++;
			CAM_DBG(CAM_ISP,
				"ctx[%d] : Deferred buf done for %llu bubble state %d recovery %d",
				ctx->ctx_id, req->request_id, bubble_state,
				req_isp->bubble_report);
			CAM_DBG(CAM_ISP,
				"ctx[%d] : Deferred info: def_acks=%d, j=%d, hld=0x%x, sync_id=%d",
				ctx->ctx_id, req_isp->num_deferred_acks, j,
				req_isp->fence_map_out[j].resource_handle,
				req_isp->fence_map_out[j].sync_id);
			continue;
		} else if (!req_isp->bubble_detected) {
			CAM_DBG(CAM_ISP,
				"Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
				req->request_id,
@@ -1015,6 +1141,13 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
			if (rc)
				CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
					 rc);

			/* Process deferred buf_done acks */
			if (req_isp->num_deferred_acks)
				__cam_isp_handle_deferred_buf_done(ctx_isp,
					req, false,
					CAM_SYNC_STATE_SIGNALED_SUCCESS);

		} else if (!req_isp->bubble_report) {
			CAM_ERR(CAM_ISP,
				"Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
@@ -1028,6 +1161,13 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
			if (rc)
				CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
					rc);

			/* Process deferred buf_done acks */
			if (req_isp->num_deferred_acks)
				__cam_isp_handle_deferred_buf_done(ctx_isp,
					req, false,
					CAM_SYNC_STATE_SIGNALED_ERROR);

		} else {
			/*
			 * Ignore the buffer done if bubble detect is on
@@ -1039,6 +1179,12 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
			CAM_DBG(CAM_ISP,
				"buf done with bubble state %d recovery %d",
				bubble_state, req_isp->bubble_report);
			/* Process deferred buf_done acks */
			if (req_isp->num_deferred_acks)
				__cam_isp_handle_deferred_buf_done(ctx_isp, req,
					true,
					CAM_SYNC_STATE_SIGNALED_ERROR);

			continue;
		}

@@ -1086,7 +1232,8 @@ static int __cam_isp_ctx_handle_buf_done(
	struct cam_isp_hw_done_event_data done_next_req;

	if (list_empty(&ctx->active_req_list)) {
		CAM_DBG(CAM_ISP, "Buf done with no active request");
		CAM_WARN(CAM_ISP,
			"Buf done with no active request ctx %u", ctx->ctx_id);
		return 0;
	}

@@ -1186,7 +1333,51 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
	struct cam_context *ctx = ctx_isp->base;

	if (list_empty(&ctx->active_req_list)) {
		CAM_DBG(CAM_ISP, "Buf done with no active request");
		CAM_WARN(CAM_ISP,
			"Buf done with no active request bubble_state=%d",
			bubble_state);

		if (!list_empty(&ctx->wait_req_list)) {
			struct cam_isp_ctx_req *req_isp;

			req = list_first_entry(&ctx->wait_req_list,
				struct cam_ctx_request, list);
			CAM_WARN(CAM_ISP,
				"Buf done with no active req, but with req in wait list, req %llu",
				req->request_id);
			req_isp = (struct cam_isp_ctx_req *) req->req_priv;

			/*
			 * Verify consumed address for this request to make sure
			 * we are handling the buf_done for the correct
			 * buffer. Also defer actual buf_done handling, i.e
			 * do not signal the fence as this request may go into
			 * Bubble state eventully.
			 */
			rc =
			__cam_isp_ctx_handle_buf_done_for_request_verify_addr(
				ctx_isp, req, done, bubble_state, true, true);
		} else if (!list_empty(&ctx->pending_req_list)) {
			struct cam_isp_ctx_req *req_isp;

			req = list_first_entry(&ctx->pending_req_list,
				struct cam_ctx_request, list);

			CAM_WARN(CAM_ISP,
				"Buf done with no active req, but req in pending list, req %llu",
				req->request_id);

			req_isp = (struct cam_isp_ctx_req *) req->req_priv;

			/*
			 *  We saw the case that the hw config is blocked due
			 *  to some reason, then we get the reg upd and bufdone
			 *  before the req is added to wait req list.
			 */
			rc =
			__cam_isp_ctx_handle_buf_done_for_request_verify_addr(
				ctx_isp, req, done, bubble_state, true, true);
		}
		return 0;
	}

@@ -1214,7 +1405,7 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
	 */
	rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
		ctx_isp, req, done, bubble_state,
		!irq_delay_detected);
		!irq_delay_detected, false);

	/*
	 * Verify the consumed address for next req all the time,
@@ -1224,7 +1415,7 @@ static int __cam_isp_ctx_handle_buf_done_verify_addr(
	if (!rc && irq_delay_detected)
		rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
			ctx_isp, next_req, done,
			bubble_state, true);
			bubble_state, true, false);

	return rc;
}
@@ -1518,6 +1709,16 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
					"CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
					req->request_id);
				ctx_isp->bubble_frame_cnt = 0;
				if (req_isp->num_fence_map_out ==
					req_isp->num_deferred_acks) {
					__cam_isp_handle_deferred_buf_done(
						ctx_isp,
						req,
						true,
						CAM_SYNC_STATE_SIGNALED_ERROR);
				__cam_isp_ctx_handle_buf_done_for_req_list(
						ctx_isp, req);
				}
			} else {
				CAM_DBG(CAM_ISP,
					"CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
@@ -1803,6 +2004,12 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
	ctx_isp->active_req_cnt++;
	CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u",
		req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
	/*
	 * Handle the deferred buf done after moving
	 * the bubble req to active req list.
	 */
	__cam_isp_ctx_handle_deferred_buf_done_in_bubble(
			ctx_isp, req);

	if ((req->request_id > ctx_isp->reported_req_id)
		&& !req_isp->bubble_report) {
@@ -1856,6 +2063,9 @@ static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
		return -EINVAL;
	}

	if (atomic_read(&ctx_isp->apply_in_progress))
		CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");

	ctx_isp->frame_id++;
	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
	ctx_isp->boot_timestamp = sof_event_data->boot_time;
@@ -2823,6 +3033,8 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
	cfg.reapply = req_isp->reapply;
	cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;

	atomic_set(&ctx_isp->apply_in_progress, 1);

	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
	if (!rc) {
		spin_lock_bh(&ctx->lock);
@@ -2857,6 +3069,7 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
			"ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
			ctx->ctx_id, apply->request_id, rc);
	}
	atomic_set(&ctx_isp->apply_in_progress, 0);
end:
	return rc;
}
@@ -3441,8 +3654,8 @@ static int __cam_isp_ctx_rdi_only_sof_in_top_state(
	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
	ctx_isp->boot_timestamp = sof_event_data->boot_time;

	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:%lld ctx %u",
		ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id);

	/*
	 * notify reqmgr with sof signal. Note, due to scheduling delay
@@ -3685,6 +3898,17 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
				CAM_DBG(CAM_ISP,
					"CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
					req->request_id);
				if (req_isp->num_fence_map_out ==
					req_isp->num_deferred_acks) {
					__cam_isp_handle_deferred_buf_done(
						ctx_isp,
						req,
						true,
						CAM_SYNC_STATE_SIGNALED_ERROR);

				__cam_isp_ctx_handle_buf_done_for_req_list(
						ctx_isp, req);
				}
				goto end;
			} else {
				CAM_DBG(CAM_ISP,
@@ -4285,6 +4509,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
	req_isp->num_fence_map_out = cfg.num_out_map_entries;
	req_isp->num_fence_map_in = cfg.num_in_map_entries;
	req_isp->num_acked = 0;
	req_isp->num_deferred_acks = 0;
	req_isp->bubble_detected = false;
	req_isp->cdm_reset_before_apply = false;

+13 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#ifndef _CAM_ISP_CONTEXT_H_
@@ -144,6 +144,13 @@ struct cam_isp_ctx_irq_ops {
 * @num_acked:                 Count to track acked entried for output.
 *                             If count equals the number of fence out, it means
 *                             the request has been completed.
 * @num_deferred_acks:         Number of buf_dones/acks that are deferred to
 *                             handle or signalled in special scenarios.
 *                             Increment this count instead of num_acked and
 *                             handle the events later where eventually
 *                             increment num_acked.
 * @deferred_fence_map_index   Saves the indices of fence_map_out for which
 *                             handling of buf_done is deferred.
 * @bubble_report:             Flag to track if bubble report is active on
 *                             current request
 * @hw_update_data:            HW update data for this request
@@ -164,6 +171,9 @@ struct cam_isp_ctx_req {
	struct cam_hw_fence_map_entry         fence_map_in[CAM_ISP_CTX_RES_MAX];
	uint32_t                              num_fence_map_in;
	uint32_t                              num_acked;
	uint32_t                              num_deferred_acks;
	uint32_t                              deferred_fence_map_index[
						CAM_ISP_CTX_RES_MAX];
	int32_t                               bubble_report;
	struct cam_isp_prepare_hw_update_data hw_update_data;
	ktime_t                               event_timestamp
@@ -257,6 +267,7 @@ struct cam_isp_context_event_record {
 * @custom_enabled:            Custom HW enabled for this ctx
 * @use_frame_header_ts:       Use frame header for qtimer ts
 * @support_consumed_addr:     Indicate whether HW has last consumed addr reg
 * @apply_in_progress          Whether request apply is in progress
 * @init_timestamp:            Timestamp at which this context is initialized
 * @rxd_epoch:                 Indicate whether epoch has been received. Used to
 *                             decide whether to apply request in offline ctx
@@ -302,6 +313,7 @@ struct cam_isp_context {
	bool                                  custom_enabled;
	bool                                  use_frame_header_ts;
	bool                                  support_consumed_addr;
	atomic_t                              apply_in_progress;
	unsigned int                          init_timestamp;
	atomic_t                              rxd_epoch;
	struct cam_req_mgr_core_workq        *workq;