Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2dc88593 authored by Haritha Chintalapati's avatar Haritha Chintalapati Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: camera: isp: Bubble re-apply with CDM callback detect" into camera-kernel.lnx.4.0

parents d9aa9104 9075af08
Loading
Loading
Loading
Loading
+11 −8
Original line number Diff line number Diff line
@@ -250,7 +250,9 @@ struct cam_hw_stream_setttings {
 * @num_out_map_entries:       Number of out map entries
 * @priv:                      Private pointer
 * @request_id:                Request ID
 * @reapply                True if reapplying after bubble
 * @reapply:                   True if reapplying after bubble
 * @cdm_reset_before_apply:    True is need to reset CDM before re-apply bubble
 *                             request
 *
 */
struct cam_hw_config_args {
@@ -263,6 +265,7 @@ struct cam_hw_config_args {
	uint64_t                        request_id;
	bool                            init_packet;
	bool                            reapply;
	bool                            cdm_reset_before_apply;
};

/**
+112 −8
Original line number Diff line number Diff line
@@ -822,6 +822,8 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
		req_isp->bubble_detected = false;
		list_del_init(&req->list);
		atomic_set(&ctx_isp->process_bubble, 0);
		req_isp->cdm_reset_before_apply = false;
		ctx_isp->bubble_frame_cnt = 0;

		if (buf_done_req_id <= ctx->last_flush_req) {
			for (i = 0; i < req_isp->num_fence_map_out; i++)
@@ -854,6 +856,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
		list_del_init(&req->list);
		list_add_tail(&req->list, &ctx->free_req_list);
		req_isp->reapply = false;
		req_isp->cdm_reset_before_apply = false;

		CAM_DBG(CAM_REQ,
			"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
@@ -1550,6 +1553,9 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
	struct cam_context *ctx = ctx_isp->base;
	struct cam_ctx_request  *req;
	struct cam_isp_ctx_req  *req_isp;
	struct cam_hw_cmd_args   hw_cmd_args;
	struct cam_isp_hw_cmd_args  isp_hw_cmd_args;
	uint64_t last_cdm_done_req = 0;
	struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
			(struct cam_isp_hw_epoch_event_data *)evt_data;

@@ -1560,6 +1566,82 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(

	ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;

	if (atomic_read(&ctx_isp->process_bubble)) {
		if (list_empty(&ctx->active_req_list)) {
			CAM_ERR(CAM_ISP,
				"No available active req in bubble");
			atomic_set(&ctx_isp->process_bubble, 0);
			ctx_isp->bubble_frame_cnt = 0;
			rc = -EINVAL;
			return rc;
		}

		if (ctx_isp->last_sof_timestamp ==
			ctx_isp->sof_timestamp_val) {
			CAM_DBG(CAM_ISP,
				"Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
				ctx_isp->sof_timestamp_val);
			goto notify_only;
		}

		req = list_first_entry(&ctx->active_req_list,
			struct cam_ctx_request, list);
		req_isp = (struct cam_isp_ctx_req *) req->req_priv;

		if (ctx_isp->bubble_frame_cnt >= 1 &&
			req_isp->bubble_detected) {
			hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
			hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
			isp_hw_cmd_args.cmd_type =
				CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
			hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
			rc = ctx->hw_mgr_intf->hw_cmd(
				ctx->hw_mgr_intf->hw_mgr_priv,
				&hw_cmd_args);
			if (rc) {
				CAM_ERR(CAM_ISP, "HW command failed");
				return rc;
			}

			last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
			CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
				last_cdm_done_req);

			if (last_cdm_done_req >= req->request_id) {
				CAM_DBG(CAM_ISP,
					"CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
					req->request_id);
				ctx_isp->bubble_frame_cnt = 0;
			} else {
				CAM_DBG(CAM_ISP,
					"CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
					req->request_id);
				req_isp->num_acked = 0;
				ctx_isp->bubble_frame_cnt = 0;
				req_isp->bubble_detected = false;
				req_isp->cdm_reset_before_apply = true;
				list_del_init(&req->list);
				list_add(&req->list, &ctx->pending_req_list);
				atomic_set(&ctx_isp->process_bubble, 0);
				ctx_isp->active_req_cnt--;
				CAM_DBG(CAM_REQ,
					"Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
					req->request_id,
					ctx_isp->active_req_cnt, ctx->ctx_id);
			}
		} else if (req_isp->bubble_detected) {
			ctx_isp->bubble_frame_cnt++;
			CAM_DBG(CAM_ISP,
				"Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
				req->request_id,
				ctx_isp->bubble_frame_cnt);
		} else {
			CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
				req->request_id);
		}
	}

notify_only:
	/*
	 * notify reqmgr with sof signal. Note, due to scheduling delay
	 * we can run into situation that two active requests has already
@@ -1610,7 +1692,7 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
			ctx->ctx_id);
		rc = -EFAULT;
	}

	ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
	return 0;
}

@@ -1776,6 +1858,7 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
	req_isp->bubble_detected = true;
	req_isp->reapply = true;
	req_isp->cdm_reset_before_apply = false;

	CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
		ctx->ctx_id, req_isp->bubble_report, req->request_id);
@@ -1990,6 +2073,7 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
	CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
		ctx->ctx_id, req_isp->bubble_report, req->request_id);
	req_isp->reapply = true;
	req_isp->cdm_reset_before_apply = false;

	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
		ctx->ctx_crm_intf->notify_err) {
@@ -2914,14 +2998,10 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
	cfg.priv  = &req_isp->hw_update_data;
	cfg.init_packet = 0;
	cfg.reapply = req_isp->reapply;
	cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;

	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv,
		&cfg);
	if (rc) {
		CAM_ERR_RATE_LIMIT(CAM_ISP,
			"ctx_id:%d ,Can not apply the configuration",
			ctx->ctx_id);
	} else {
	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
	if (!rc) {
		spin_lock_bh(&ctx->lock);
		ctx_isp->substate_activated = next_state;
		ctx_isp->last_applied_req_id = apply->request_id;
@@ -2937,6 +3017,22 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
			req->request_id);
		__cam_isp_ctx_update_event_record(ctx_isp,
			CAM_ISP_CTX_EVENT_APPLY, req);
	} else if (rc == -EALREADY) {
		spin_lock_bh(&ctx->lock);
		req_isp->bubble_detected = true;
		req_isp->cdm_reset_before_apply = false;
		atomic_set(&ctx_isp->process_bubble, 1);
		list_del_init(&req->list);
		list_add(&req->list, &ctx->active_req_list);
		ctx_isp->active_req_cnt++;
		spin_unlock_bh(&ctx->lock);
		CAM_DBG(CAM_REQ,
			"move request %lld to active list(cnt = %d), ctx %u",
			req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
	} else {
		CAM_ERR_RATE_LIMIT(CAM_ISP,
			"ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
			ctx->ctx_id, apply->request_id, rc);
	}
end:
	return rc;
@@ -3274,6 +3370,7 @@ static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
			}
		}
		req_isp->reapply = false;
		req_isp->cdm_reset_before_apply = false;
		list_del_init(&req->list);
		list_add_tail(&req->list, &ctx->free_req_list);
	}
@@ -3373,6 +3470,7 @@ static int __cam_isp_ctx_flush_req_in_top_state(
	}

end:
	ctx_isp->bubble_frame_cnt = 0;
	atomic_set(&ctx_isp->process_bubble, 0);
	atomic_set(&ctx_isp->rxd_epoch, 0);
	return rc;
@@ -3638,6 +3736,7 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
	CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
		ctx->ctx_id, req_isp->bubble_report, req->request_id);
	req_isp->reapply = true;
	req_isp->cdm_reset_before_apply = false;

	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
		ctx->ctx_crm_intf->notify_err) {
@@ -4251,6 +4350,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
	req_isp->num_fence_map_in = cfg.num_in_map_entries;
	req_isp->num_acked = 0;
	req_isp->bubble_detected = false;
	req_isp->cdm_reset_before_apply = false;
	req_isp->hw_update_data.packet = packet;

	for (i = 0; i < req_isp->num_fence_map_out; i++) {
@@ -5008,6 +5108,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
	start_isp.hw_config.priv  = &req_isp->hw_update_data;
	start_isp.hw_config.init_packet = 1;
	start_isp.hw_config.reapply = 0;
	start_isp.hw_config.cdm_reset_before_apply = false;

	ctx_isp->last_applied_req_id = req->request_id;

@@ -5021,6 +5122,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
	ctx_isp->frame_id = 0;
	ctx_isp->active_req_cnt = 0;
	ctx_isp->reported_req_id = 0;
	ctx_isp->bubble_frame_cnt = 0;
	ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
		CAM_ISP_CTX_ACTIVATED_APPLIED :
		(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
@@ -5200,6 +5302,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
	ctx_isp->reported_req_id = 0;
	ctx_isp->last_applied_req_id = 0;
	ctx_isp->req_info.last_bufdone_req_id = 0;
	ctx_isp->bubble_frame_cnt = 0;
	atomic_set(&ctx_isp->process_bubble, 0);
	atomic_set(&ctx_isp->rxd_epoch, 0);
	atomic64_set(&ctx_isp->state_monitor_head, -1);
@@ -5715,6 +5818,7 @@ int cam_isp_context_init(struct cam_isp_context *ctx,
	ctx->use_frame_header_ts = false;
	ctx->active_req_cnt = 0;
	ctx->reported_req_id = 0;
	ctx->bubble_frame_cnt = 0;
	ctx->req_info.last_bufdone_req_id = 0;

	ctx->hw_ctx = NULL;
+22 −15
Original line number Diff line number Diff line
@@ -153,6 +153,8 @@ struct cam_isp_ctx_irq_ops {
 * @hw_update_data:            HW update data for this request
 * @event_timestamp:           Timestamp for different stage of request
 * @reapply:                   True if reapplying after bubble
 * @cdm_reset_before_apply:    For bubble re-apply when buf done not coming set
 *                             to True
 *
 */
struct cam_isp_ctx_req {
@@ -171,6 +173,7 @@ struct cam_isp_ctx_req {
		[CAM_ISP_CTX_EVENT_MAX];
	bool                                  bubble_detected;
	bool                                  reapply;
	bool                                  cdm_reset_before_apply;
};

/**
@@ -242,6 +245,8 @@ struct cam_isp_context_event_record {
 * @subscribe_event:           The irq event mask that CRM subscribes to, IFE
 *                             will invoke CRM cb at those event.
 * @last_applied_req_id:       Last applied request id
 * @last_sof_timestamp:        SOF timestamp of the last frame
 * @bubble_frame_cnt:          Count of the frame after bubble
 * @state_monitor_head:        Write index to the state monitoring array
 * @req_info                   Request id information about last buf done
 * @cam_isp_ctx_state_monitor: State monitoring array
@@ -284,6 +289,8 @@ struct cam_isp_context {
	int64_t                          reported_req_id;
	uint32_t                         subscribe_event;
	int64_t                          last_applied_req_id;
	uint64_t                         last_sof_timestamp;
	uint32_t                         bubble_frame_cnt;
	atomic64_t                       state_monitor_head;
	struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[
		CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES];
+34 −0
Original line number Diff line number Diff line
@@ -2837,6 +2837,7 @@ void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
		complete_all(&ctx->config_done_complete);
		reg_dump_done = atomic_read(&ctx->cdm_done);
		atomic_set(&ctx->cdm_done, 1);
		ctx->last_cdm_done_req = cookie;
		if ((g_ife_hw_mgr.debug_cfg.per_req_reg_dump) &&
			(!reg_dump_done))
			cam_ife_mgr_handle_reg_dump(ctx,
@@ -3239,6 +3240,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
	if (cdm_acquire.id == CAM_CDM_IFE)
		ife_ctx->internal_cdm = true;
	atomic_set(&ife_ctx->cdm_done, 1);
	ife_ctx->last_cdm_done_req = 0;

	acquire_args->support_consumed_addr =
		g_ife_hw_mgr.support_consumed_addr;
@@ -3517,6 +3519,7 @@ static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
	ife_ctx->cdm_handle = cdm_acquire.handle;
	ife_ctx->cdm_id = cdm_acquire.id;
	atomic_set(&ife_ctx->cdm_done, 1);
	ife_ctx->last_cdm_done_req = 0;

	acquire_args->ctxt_to_hw_map = ife_ctx;
	ife_ctx->ctx_in_use = 1;
@@ -3853,6 +3856,7 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
	struct cam_ife_hw_mgr_ctx *ctx;
	struct cam_isp_prepare_hw_update_data *hw_update_data;
	unsigned long rem_jiffies = 0;
	bool cdm_hang_detect = false;

	if (!hw_mgr_priv || !config_hw_args) {
		CAM_ERR(CAM_ISP,
@@ -3888,6 +3892,31 @@ static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
	CAM_DBG(CAM_ISP, "Ctx[%pK][%d] : Applying Req %lld, init_packet=%d",
		ctx, ctx->ctx_index, cfg->request_id, cfg->init_packet);

	if (cfg->reapply && cfg->cdm_reset_before_apply) {
		if (ctx->last_cdm_done_req < cfg->request_id) {
			cdm_hang_detect =
				cam_cdm_detect_hang_error(ctx->cdm_handle);
			CAM_ERR_RATE_LIMIT(CAM_ISP,
				"CDM callback not received for req: %lld, last_cdm_done_req: %lld, cdm_hang_detect: %d",
				cfg->request_id, ctx->last_cdm_done_req,
				cdm_hang_detect);
			rc = cam_cdm_reset_hw(ctx->cdm_handle);
			if (rc) {
				CAM_ERR_RATE_LIMIT(CAM_ISP,
					"CDM reset unsuccessful for req: %lld. ctx: %d, rc: %d",
					cfg->request_id, ctx->ctx_index, rc);
				ctx->last_cdm_done_req = 0;
				return rc;
			}
		} else {
			CAM_ERR_RATE_LIMIT(CAM_ISP,
				"CDM callback received, should wait for buf done for req: %lld",
				cfg->request_id);
			return -EALREADY;
		}
		ctx->last_cdm_done_req = 0;
	}

	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
		if (hw_update_data->bw_config_valid[i] == true) {

@@ -4786,6 +4815,7 @@ static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
	ctx->is_fe_enabled = false;
	ctx->is_offline = false;
	ctx->pf_mid_found = false;
	ctx->last_cdm_done_req = 0;
	atomic_set(&ctx->overflow_pending, 0);
	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
		ctx->sof_cnt[i] = 0;
@@ -7014,6 +7044,10 @@ static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
				isp_hw_cmd_args->u.packet_op_code =
				CAM_ISP_PACKET_UPDATE_DEV;
			break;
		case CAM_ISP_HW_MGR_GET_LAST_CDM_DONE:
			isp_hw_cmd_args->u.last_cdm_done =
				ctx->last_cdm_done_req;
			break;
		default:
			CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
				hw_cmd_args->cmd_type);
+2 −0
Original line number Diff line number Diff line
@@ -85,6 +85,7 @@ struct cam_ife_hw_mgr_debug {
 *                          context
 * @cdm_done                flag to indicate cdm has finished writing shadow
 *                          registers
 * @last_cdm_done_req:      Last cdm done request
 * @is_rdi_only_context     flag to specify the context has only rdi resource
 * @config_done_complete    indicator for configuration complete
 * @reg_dump_buf_desc:      cmd buffer descriptors for reg dump
@@ -138,6 +139,7 @@ struct cam_ife_hw_mgr_ctx {
	uint32_t                        eof_cnt[CAM_IFE_HW_NUM_MAX];
	atomic_t                        overflow_pending;
	atomic_t                        cdm_done;
	uint64_t                        last_cdm_done_req;
	uint32_t                        is_rdi_only_context;
	struct completion               config_done_complete;
	uint32_t                        hw_version;
Loading