Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47c185f9 authored by Venkat Chinta's avatar Venkat Chinta Committed by Gerrit - the friendly Code Review server
Browse files

msm: camera: isp: Add support for offline IFE



This change adds new IRQ state machine and updates the acquire
logic to enable offline IFE. This change also adds the fixes
necessary to enable bus read for various buffer formats and
append go command at the end of each packet for offline
context.

CRs-Fixed: 2513939
Change-Id: Ie068670ed11aa6713e8f0cb817e4b5d4c209e696
Signed-off-by: default avatarVenkat Chinta <vchinta@codeaurora.org>
Signed-off-by: default avatarVishalsingh Hajeri <vhajeri@codeaurora.org>
parent 2dad6bd0
Loading
Loading
Loading
Loading
+199 −8
Original line number Diff line number Diff line
@@ -965,6 +965,96 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
	return rc;
}

static int __cam_isp_ctx_apply_req_offline(
	struct cam_context *ctx, uint32_t next_state,
	struct cam_isp_context *ctx_isp)
{
	int rc = 0;
	struct cam_ctx_request          *req;
	struct cam_isp_ctx_req          *req_isp;
	struct cam_hw_config_args        cfg;

	if (list_empty(&ctx->pending_req_list)) {
		CAM_DBG(CAM_ISP, "No pending requests to apply");
		rc = -EFAULT;
		goto end;
	}

	if (ctx->state != CAM_CTX_ACTIVATED)
		goto end;

	if (ctx_isp->active_req_cnt >= 2)
		goto end;

	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
		list);

	CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx %u",
		req->request_id, ctx_isp->substate_activated, ctx->ctx_id);
	req_isp = (struct cam_isp_ctx_req *) req->req_priv;

	memset(&cfg, 0, sizeof(cfg));

	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
	cfg.request_id = req->request_id;
	cfg.hw_update_entries = req_isp->cfg;
	cfg.num_hw_update_entries = req_isp->num_cfg;
	cfg.priv  = &req_isp->hw_update_data;
	cfg.init_packet = 0;

	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
	if (rc) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
	} else {
		atomic_set(&ctx_isp->rxd_epoch, 0);
		ctx_isp->substate_activated = next_state;
		ctx_isp->last_applied_req_id = req->request_id;
		list_del_init(&req->list);
		list_add_tail(&req->list, &ctx->wait_req_list);
		CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
			next_state, ctx_isp->last_applied_req_id);

		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
			CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
			req->request_id);
	}
end:
	return rc;
}

static int __cam_isp_ctx_offline_epoch_in_activated_state(
	struct cam_isp_context *ctx_isp, void *evt_data)
{
	struct cam_context *ctx = ctx_isp->base;
	struct cam_ctx_request *req, *req_temp;
	uint64_t request_id = 0;

	atomic_set(&ctx_isp->rxd_epoch, 1);

	CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u", ctx_isp->frame_id,
		ctx->ctx_id);

	list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
		if (req->request_id > ctx_isp->reported_req_id) {
			request_id = req->request_id;
			ctx_isp->reported_req_id = request_id;
			break;
		}
	}

	__cam_isp_ctx_apply_req_offline(ctx, CAM_ISP_CTX_ACTIVATED_APPLIED,
		ctx_isp);

	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
		CAM_REQ_MGR_SOF_EVENT_SUCCESS);

	__cam_isp_ctx_update_state_monitor_array(ctx_isp,
		CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
		request_id);

	return 0;
}

static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state(
	struct cam_isp_context *ctx_isp, void *evt_data)
{
@@ -1738,6 +1828,9 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,

	} while (req->request_id < ctx_isp->last_applied_req_id);

	if (ctx_isp->offline_context)
		goto exit;

	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
		notify.link_hdl = ctx->link_hdl;
		notify.dev_hdl = ctx->dev_hdl;
@@ -1788,6 +1881,7 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,

	CAM_DBG(CAM_ISP, "Exit");

exit:
	return rc;
}

@@ -2171,6 +2265,63 @@ static struct cam_isp_ctx_irq_ops
	},
};

static struct cam_isp_ctx_irq_ops
	cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
	/* SOF */
	{
		.irq_ops = {
			__cam_isp_ctx_handle_error,
			NULL,
			NULL,
			NULL,
			NULL,
			NULL,
		},
	},
	/* APPLIED */
	{
		.irq_ops = {
			__cam_isp_ctx_handle_error,
			__cam_isp_ctx_sof_in_activated_state,
			__cam_isp_ctx_reg_upd_in_applied_state,
			NULL,
			NULL,
			__cam_isp_ctx_buf_done_in_applied,
		},
	},
	/* EPOCH */
	{
		.irq_ops = {
			__cam_isp_ctx_handle_error,
			NULL,
			NULL,
			__cam_isp_ctx_offline_epoch_in_activated_state,
			NULL,
			__cam_isp_ctx_buf_done_in_epoch,
		},
	},
	/* BUBBLE */
	{
	},
	/* Bubble Applied */
	{
	},
	/* HW ERROR */
	{
		.irq_ops = {
			NULL,
			__cam_isp_ctx_sof_in_activated_state,
			__cam_isp_ctx_reg_upd_in_hw_error,
			NULL,
			NULL,
			NULL,
		},
	},
	/* HALT */
	{
	},
};

static int __cam_isp_ctx_apply_req_in_activated_state(
	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
	enum cam_isp_ctx_activated_substate next_state)
@@ -2722,6 +2873,7 @@ static int __cam_isp_ctx_flush_req_in_top_state(
end:
	ctx_isp->bubble_frame_cnt = 0;
	atomic_set(&ctx_isp->process_bubble, 0);
	atomic_set(&ctx_isp->rxd_epoch, 0);
	return rc;
}

@@ -3606,8 +3758,11 @@ static int __cam_isp_ctx_config_dev_in_top_state(
				ctx->state);
		}
	} else {
		if (ctx->state != CAM_CTX_FLUSHED && ctx->state >= CAM_CTX_READY
			&& ctx->ctx_crm_intf->add_req) {
		if (ctx_isp->offline_context) {
			__cam_isp_ctx_enqueue_request_in_order(ctx, req);
		} else if ((ctx->state != CAM_CTX_FLUSHED) &&
			(ctx->state >= CAM_CTX_READY) &&
			ctx->ctx_crm_intf->add_req) {
			add_req.link_hdl = ctx->link_hdl;
			add_req.dev_hdl  = ctx->dev_hdl;
			add_req.req_id   = req->request_id;
@@ -3634,6 +3789,13 @@ static int __cam_isp_ctx_config_dev_in_top_state(
		"Preprocessing Config req_id %lld successful on ctx %u",
		req->request_id, ctx->ctx_id);

	if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch)) {
		spin_lock_bh(&ctx->lock);
		__cam_isp_ctx_apply_req_offline(ctx,
			CAM_ISP_CTX_ACTIVATED_APPLIED, ctx_isp);
		spin_unlock_bh(&ctx->lock);
	}

	return rc;

put_ref:
@@ -3753,6 +3915,10 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
			cam_isp_ctx_fs2_state_machine_irq;
		ctx_isp->substate_machine =
			cam_isp_ctx_fs2_state_machine;
	} else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
		CAM_DBG(CAM_ISP, "offline Session has PIX and RD resources");
		ctx_isp->substate_machine_irq =
			cam_isp_ctx_offline_state_machine_irq;
	} else {
		CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
		ctx_isp->substate_machine_irq =
@@ -3911,6 +4077,11 @@ static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
			cam_isp_ctx_fs2_state_machine_irq;
		ctx_isp->substate_machine =
			cam_isp_ctx_fs2_state_machine;
	} else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
		CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources");
		ctx_isp->substate_machine_irq =
			cam_isp_ctx_offline_state_machine_irq;
		ctx_isp->substate_machine = NULL;
	} else {
		CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
		ctx_isp->substate_machine_irq =
@@ -4062,6 +4233,12 @@ static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
			cam_isp_ctx_fs2_state_machine_irq;
		ctx_isp->substate_machine =
			cam_isp_ctx_fs2_state_machine;
	} else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
		CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources");
		ctx_isp->substate_machine_irq =
			cam_isp_ctx_offline_state_machine_irq;
		ctx_isp->substate_machine = NULL;
		ctx_isp->offline_context = true;
	} else {
		CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
		ctx_isp->substate_machine_irq =
@@ -4128,11 +4305,10 @@ static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,

	rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);

	if (!rc && (ctx->link_hdl >= 0)) {
	if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) {
		ctx->state = CAM_CTX_READY;
		trace_cam_context_state("ISP", ctx);
	}

	CAM_DBG(CAM_ISP, "next state %d", ctx->state);
	return rc;
}
@@ -4289,6 +4465,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
		start_isp.start_only = false;

	atomic_set(&ctx_isp->process_bubble, 0);
	atomic_set(&ctx_isp->rxd_epoch, 0);
	ctx_isp->frame_id = 0;
	ctx_isp->active_req_cnt = 0;
	ctx_isp->reported_req_id = 0;
@@ -4311,11 +4488,24 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
	 */
	list_del_init(&req->list);

	if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
	if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
		list_add_tail(&req->list, &ctx->free_req_list);
		atomic_set(&ctx_isp->rxd_epoch, 1);
		CAM_DBG(CAM_REQ,
			"Move pending req: %lld to free list(cnt: %d) offline ctx %u",
			req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
	} else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
		list_add_tail(&req->list, &ctx->wait_req_list);
		CAM_DBG(CAM_REQ,
			"Move pending req: %lld to wait list(cnt: %d) ctx %u",
			req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
	} else {
		list_add_tail(&req->list, &ctx->active_req_list);
		ctx_isp->active_req_cnt++;
		CAM_DBG(CAM_REQ,
			"Move pending req: %lld to active list(cnt: %d) ctx %u offline %d",
			req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id,
			ctx_isp->offline_context);
	}

	/*
@@ -4460,6 +4650,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
	ctx_isp->last_applied_req_id = 0;
	ctx_isp->req_info.last_bufdone_req_id = 0;
	atomic_set(&ctx_isp->process_bubble, 0);
	atomic_set(&ctx_isp->rxd_epoch, 0);
	atomic64_set(&ctx_isp->state_monitor_head, -1);

	for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
+5 −0
Original line number Diff line number Diff line
@@ -245,12 +245,15 @@ struct cam_isp_context_event_record {
 * @event_record:              Event record array
 * @rdi_only_context:          Get context type information.
 *                             true, if context is rdi only context
 * @offline_context:           Indicate whether context is for offline IFE
 * @hw_acquired:               Indicate whether HW resources are acquired
 * @init_received:             Indicate whether init config packet is received
 * @split_acquire:             Indicate whether a separate acquire is expected
 * @custom_enabled:            Custom HW enabled for this ctx
 * @use_frame_header_ts:       Use frame header for qtimer ts
 * @init_timestamp:            Timestamp at which this context is initialized
 * @rxd_epoch:                 Indicate whether epoch has been received. Used to
 *                             decide whether to apply request in offline ctx
 *
 */
struct cam_isp_context {
@@ -283,12 +286,14 @@ struct cam_isp_context {
	struct cam_isp_context_event_record   event_record[
		CAM_ISP_CTX_EVENT_MAX][CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES];
	bool                                  rdi_only_context;
	bool                                  offline_context;
	bool                                  hw_acquired;
	bool                                  init_received;
	bool                                  split_acquire;
	bool                                  custom_enabled;
	bool                                  use_frame_header_ts;
	unsigned int                          init_timestamp;
	atomic_t                              rxd_epoch;
};

/**
+414 −296

File changed.

Preview size limit exceeded, changes collapsed.

+4 −2
Original line number Diff line number Diff line
@@ -137,11 +137,12 @@ struct cam_ife_hw_mgr_debug {
 * @last_dump_flush_req_id  Last req id for which reg dump on flush was called
 * @last_dump_err_req_id    Last req id for which reg dump on error was called
 * @init_done               indicate whether init hw is done
 * @is_fe_enable            indicate whether fetch engine\read path is enabled
 * @is_fe_enabled           Indicate whether fetch engine\read path is enabled
 * @is_dual                 indicate whether context is in dual VFE mode
 * @custom_enabled          update the flag if context is connected to custom HW
 * @use_frame_header_ts     obtain qtimer ts using frame header
 * @ts                      captured timestamp when the ctx is acquired
 * @is_offline              Indicate whether context is for offline IFE
 */
struct cam_ife_hw_mgr_ctx {
	struct list_head                list;
@@ -186,11 +187,12 @@ struct cam_ife_hw_mgr_ctx {
	uint64_t                        last_dump_flush_req_id;
	uint64_t                        last_dump_err_req_id;
	bool                            init_done;
	bool                            is_fe_enable;
	bool                            is_fe_enabled;
	bool                            is_dual;
	bool                            custom_enabled;
	bool                            use_frame_header_ts;
	struct timespec64               ts;
	bool                            is_offline;
};

/**
+98 −1
Original line number Diff line number Diff line
@@ -926,7 +926,7 @@ int cam_isp_add_reg_update(
				return rc;

			CAM_DBG(CAM_ISP, "Reg update added for res %d hw_id %d",
				res->res_id, res->hw_intf->hw_idx);
				res->res_type, res->hw_intf->hw_idx);
			reg_update_size += get_regup.cmd.used_bytes;
		}
	}
@@ -960,3 +960,100 @@ int cam_isp_add_reg_update(

	return rc;
}

int cam_isp_add_go_cmd(
	struct cam_hw_prepare_update_args    *prepare,
	struct list_head                     *res_list_isp_rd,
	uint32_t                              base_idx,
	struct cam_kmd_buf_info              *kmd_buf_info)
{
	int rc = -EINVAL;
	struct cam_isp_resource_node         *res;
	struct cam_ife_hw_mgr_res            *hw_mgr_res;
	struct cam_hw_update_entry           *hw_entry;
	struct cam_isp_hw_get_cmd_update      get_regup;
	uint32_t kmd_buf_remain_size, num_ent, i, reg_update_size;

	hw_entry = prepare->hw_update_entries;
	if (prepare->num_hw_update_entries + 1 >=
		prepare->max_hw_update_entries) {
		CAM_ERR(CAM_ISP, "Insufficient HW entries current: %d max: %d",
			prepare->num_hw_update_entries,
			prepare->max_hw_update_entries);
		return -EINVAL;
	}

	reg_update_size = 0;
	list_for_each_entry(hw_mgr_res, res_list_isp_rd, list) {
		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
			continue;

		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
			if (!hw_mgr_res->hw_res[i])
				continue;

			res = hw_mgr_res->hw_res[i];
			if (res->hw_intf->hw_idx != base_idx)
				continue;

			if (kmd_buf_info->size > (kmd_buf_info->used_bytes +
				reg_update_size)) {
				kmd_buf_remain_size =  kmd_buf_info->size -
					(kmd_buf_info->used_bytes +
					reg_update_size);
			} else {
				CAM_ERR(CAM_ISP, "no free mem %d %d %d",
					base_idx, kmd_buf_info->size,
					kmd_buf_info->used_bytes +
					reg_update_size);
				rc = -EINVAL;
				return rc;
			}

			get_regup.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
				kmd_buf_info->used_bytes/4 +
				reg_update_size/4;
			get_regup.cmd.size = kmd_buf_remain_size;
			get_regup.cmd_type = CAM_ISP_HW_CMD_FE_TRIGGER_CMD;
			get_regup.res = res;

			rc = res->process_cmd(res->res_priv,
				CAM_ISP_HW_CMD_FE_TRIGGER_CMD, &get_regup,
				sizeof(struct cam_isp_hw_get_cmd_update));
			if (rc)
				return rc;

			CAM_DBG(CAM_ISP, "GO_CMD added for RD res %d hw_id %d",
				res->res_type, res->hw_intf->hw_idx);
			reg_update_size += get_regup.cmd.used_bytes;
		}
	}

	if (reg_update_size) {
		/* Update the HW entries */
		num_ent = prepare->num_hw_update_entries;
		prepare->hw_update_entries[num_ent].handle =
					kmd_buf_info->handle;
		prepare->hw_update_entries[num_ent].len = reg_update_size;
		prepare->hw_update_entries[num_ent].offset =
			kmd_buf_info->offset;

		prepare->hw_update_entries[num_ent].flags =
			CAM_ISP_IOCFG_BL;
		CAM_DBG(CAM_ISP,
			"num_ent=%d handle=0x%x, len=%u, offset=%u",
			num_ent,
			prepare->hw_update_entries[num_ent].handle,
			prepare->hw_update_entries[num_ent].len,
			prepare->hw_update_entries[num_ent].offset);
		num_ent++;

		kmd_buf_info->used_bytes += reg_update_size;
		kmd_buf_info->offset     += reg_update_size;
		prepare->num_hw_update_entries = num_ent;
		rc = 0;
	}

	return rc;
}
Loading