Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b0c9e07 authored by Ravikishore Pampana's avatar Ravikishore Pampana
Browse files

msm: camera: isp: Add support for initial frame drop



Initial frame drop is required for some sensors. User space
provide the number of  frames need to drop through blob
command buffer.
If initial frame drop is provided, CSID driver enable the SOF irq
at csid path start. For every SOF IRQ, count will be incremented
and if it reaches the init frame drop number then configure path
control registers to start the path at frame boundary.

For dual ife usecase, first RDI frame need to drop as pix frame
will be dropped for csid synchronization.

Change-Id: I615c9f03a7387329214fa6d2fc12e8578a665593
Signed-off-by: default avatarRavikishore Pampana <rpampana@codeaurora.org>
parent f08671a6
Loading
Loading
Loading
Loading
+47 −0
Original line number Diff line number Diff line
@@ -3575,6 +3575,38 @@ void fill_res_bitmap(uint32_t resource_type, unsigned long *res_bitmap)
	}
}

static int cam_isp_blob_init_frame_drop(
	struct cam_isp_init_frame_drop_config  *frame_drop_cfg,
	struct cam_hw_prepare_update_args      *prepare)
{
	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
	struct cam_ife_hw_mgr_res             *hw_mgr_res;
	struct cam_hw_intf                    *hw_intf;
	uint32_t hw_idx = UINT_MAX;
	uint32_t  i;
	int rc = 0;

	ctx = prepare->ctxt_to_hw_map;
	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
			if (!hw_mgr_res->hw_res[i])
				continue;

			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
			if (hw_intf->hw_idx == hw_idx)
				continue;

			rc = hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
				CAM_IFE_CSID_SET_INIT_FRAME_DROP,
				frame_drop_cfg,
				sizeof(
				struct cam_isp_init_frame_drop_config *));
			hw_idx = hw_intf->hw_idx;
		}
	}
	return rc;
}

static int cam_isp_packet_generic_blob_handler(void *user_data,
	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
{
@@ -3820,7 +3852,22 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
			CAM_ERR(CAM_ISP, "FS Update Failed rc: %d", rc);
	}
		break;
	case CAM_ISP_GENERIC_BLOB_TYPE_INIT_FRAME_DROP: {
		struct cam_isp_init_frame_drop_config  *frame_drop_cfg =
			(struct cam_isp_init_frame_drop_config *)blob_data;

		if (blob_size < sizeof(struct cam_isp_init_frame_drop_config)) {
			CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
				blob_size,
				sizeof(struct cam_isp_init_frame_drop_config));
			return -EINVAL;
		}

		rc = cam_isp_blob_init_frame_drop(frame_drop_cfg, prepare);
		if (rc)
			CAM_ERR(CAM_ISP, "Init Frame drop Update Failed");
	}
		break;
	default:
		CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
		break;
+263 −18
Original line number Diff line number Diff line
@@ -468,6 +468,9 @@ static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
			csid_hw->hw_intf->hw_idx, val);
	csid_hw->error_irq_count = 0;

	for (i = 0 ; i < CAM_IFE_PIX_PATH_RES_MAX; i++)
		csid_hw->res_sof_cnt[i] = 0;

	return rc;
}

@@ -838,7 +841,6 @@ static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
	return rc;
}


static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
	struct cam_csid_hw_reserve_resource_args  *reserve)
{
@@ -953,7 +955,7 @@ static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
	path_data->height  = reserve->in_port->height;
	path_data->start_line = reserve->in_port->line_start;
	path_data->end_line = reserve->in_port->line_stop;

	path_data->usage_type = reserve->in_port->usage_type;
	/* Enable RDI crop for single ife use case only */
	switch (reserve->res_id) {
	case CAM_IFE_PIX_PATH_RES_RDI_0:
@@ -1120,6 +1122,7 @@ static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw *csid_hw)
static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
{
	int rc = -EINVAL;
	uint32_t i;
	struct cam_hw_soc_info                   *soc_info;
	const struct cam_ife_csid_reg_offset     *csid_reg;
	unsigned long                             flags;
@@ -1160,12 +1163,95 @@ static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
	spin_lock_irqsave(&csid_hw->lock_state, flags);
	csid_hw->device_enabled = 0;
	spin_unlock_irqrestore(&csid_hw->lock_state, flags);
	for (i = 0; i < CAM_IFE_PIX_PATH_RES_MAX; i++)
		csid_hw->res_sof_cnt[i] = 0;

	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
	csid_hw->error_irq_count = 0;

	return rc;
}

static int cam_ife_csid_check_path_active(struct cam_ife_csid_hw   *csid_hw)
{
	struct cam_hw_soc_info          *soc_info;
	const struct cam_ife_csid_reg_offset  *csid_reg;
	uint32_t i, path_status = 1;

	csid_reg = csid_hw->csid_info->csid_reg;
	soc_info = &csid_hw->hw_info->soc_info;

	/* check the IPP path status */
	if (csid_reg->cmn_reg->num_pix) {
		path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
				csid_reg->ipp_reg->csid_pxl_status_addr);
		CAM_DBG(CAM_ISP, "CSID:%d IPP path status:%d",
			csid_hw->hw_intf->hw_idx, path_status);
		/* if status is 0 then it is active */
		if (!path_status)
			goto end;
	}

	if (csid_reg->cmn_reg->num_ppp) {
		path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
				csid_reg->ppp_reg->csid_pxl_status_addr);
		CAM_DBG(CAM_ISP, "CSID:%d PPP path status:%d",
			csid_hw->hw_intf->hw_idx, path_status);
		/* if status is 0 then it is active */
		if (!path_status)
			goto end;
	}

	/* Check the RDI path status */
	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
		path_status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
				csid_reg->rdi_reg[i]->csid_rdi_status_addr);
		CAM_DBG(CAM_ISP, "CSID:%d RDI:%d path status:%d",
			csid_hw->hw_intf->hw_idx, i,  path_status);
		/* if status is 0 then it is active */
		if (!path_status)
			goto end;
	}

end:
	return path_status;
}

static void cam_ife_csid_reset_init_frame_drop(
	struct cam_ife_csid_hw   *csid_hw)
{
	const struct cam_ife_csid_reg_offset  *csid_reg;
	uint32_t i = 0;

	/*
	 * Reset CSID init frame drop value only if all resources are
	 * released
	 */
	csid_reg = csid_hw->csid_info->csid_reg;
	if (csid_reg->cmn_reg->num_pix) {
		if (csid_hw->ipp_res.res_state !=
			CAM_ISP_RESOURCE_STATE_AVAILABLE)
			goto end;
	}

	if (csid_reg->cmn_reg->num_ppp) {
		if (csid_hw->ppp_res.res_state !=
			CAM_ISP_RESOURCE_STATE_AVAILABLE)
			goto end;
	}

	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
		if (csid_hw->rdi_res[i].res_state !=
			CAM_ISP_RESOURCE_STATE_AVAILABLE)
			goto end;
	}

	/* All CSID resources are available reset the init frame drop */
	csid_hw->init_frame_drop = 0;
end:
	return;

}

static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
	struct cam_isp_resource_node       *res)
@@ -1725,7 +1811,7 @@ static int cam_ife_csid_enable_pxl_path(
	struct cam_ife_csid_path_cfg             *path_data;
	const struct cam_ife_csid_pxl_reg_offset *pxl_reg = NULL;
	bool                                      is_ipp;
	uint32_t                                  val = 0;
	uint32_t                                  val = 0, path_status;

	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
	csid_reg = csid_hw->csid_info->csid_reg;
@@ -1768,14 +1854,15 @@ static int cam_ife_csid_enable_pxl_path(
		/* Default is internal halt mode */
		val = 0;

	/*
	 * Resume at frame boundary if Master or No Sync.
	 * Slave will get resume command from Master.
	 */
	/* Resume at frame boundary */
	path_status = cam_ife_csid_check_path_active(csid_hw);
	if (!csid_hw->init_frame_drop ||
		(csid_hw->init_frame_drop && !path_status)) {
		CAM_DBG(CAM_ISP, "start pixel path");
		if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
			path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
			val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;

	}
	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
		pxl_reg->csid_pxl_ctrl_addr);

@@ -1789,8 +1876,10 @@ static int cam_ife_csid_enable_pxl_path(
	if (pxl_reg->ccif_violation_en)
		val |= CSID_PATH_ERROR_CCIF_VIOLATION;

	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ) ||
		(csid_hw->init_frame_drop && path_status))
		val |= CSID_PATH_INFO_INPUT_SOF;

	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
		val |= CSID_PATH_INFO_INPUT_EOF;

@@ -2087,8 +2176,10 @@ static int cam_ife_csid_enable_rdi_path(
{
	const struct cam_ife_csid_reg_offset      *csid_reg;
	struct cam_hw_soc_info                    *soc_info;
	uint32_t id, val;
	struct cam_ife_csid_path_cfg              *path_data;
	uint32_t id, val, path_status;

	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
	csid_reg = csid_hw->csid_info->csid_reg;
	soc_info = &csid_hw->hw_info->soc_info;
	id = res->res_id;
@@ -2103,19 +2194,28 @@ static int cam_ife_csid_enable_rdi_path(
		return -EINVAL;
	}

	if (path_data->usage_type)
		path_data->init_frame_drop = csid_hw->init_frame_drop + 1;

	/*resume at frame boundary */
	path_status = cam_ife_csid_check_path_active(csid_hw);
	if (!path_data->init_frame_drop ||
		(path_data->init_frame_drop && !path_status)) {
		CAM_DBG(CAM_ISP, "Start RDI:%d path", id);
		cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
				soc_info->reg_map[0].mem_base +
				csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);

	}
	/* Enable the required RDI interrupts */
	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;

	if (csid_reg->rdi_reg[id]->ccif_violation_en)
		val |= CSID_PATH_ERROR_CCIF_VIOLATION;

	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ) ||
		(path_data->init_frame_drop && path_status))
		val |= CSID_PATH_INFO_INPUT_SOF;

	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
		val |= CSID_PATH_INFO_INPUT_EOF;

@@ -2375,6 +2475,19 @@ static int cam_ife_csid_set_csid_debug(struct cam_ife_csid_hw *csid_hw,
	return 0;
}

static int cam_ife_csid_set_init_frame_drop(struct cam_ife_csid_hw   *csid_hw,
	void *cmd_args)
{
	struct cam_isp_init_frame_drop_config  *frame_drop_cfg;

	frame_drop_cfg = (struct cam_isp_init_frame_drop_config  *) cmd_args;
	csid_hw->init_frame_drop = frame_drop_cfg->init_frame_drop;
	CAM_DBG(CAM_ISP, "CSID:%d set init frame drop:%d",
		csid_hw->hw_intf->hw_idx, csid_hw->init_frame_drop);

	return 0;
}

static int cam_ife_csid_get_hw_caps(void *hw_priv,
	void *get_hw_cap_args, uint32_t arg_size)
{
@@ -2551,6 +2664,7 @@ static int cam_ife_csid_release(void *hw_priv,
		break;
	case CAM_ISP_RESOURCE_PIX_PATH:
		res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
		cam_ife_csid_reset_init_frame_drop(csid_hw);
		break;
	default:
		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
@@ -3016,6 +3130,9 @@ static int cam_ife_csid_process_cmd(void *hw_priv,
	case CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE:
		rc = cam_ife_csid_set_csid_clock(csid_hw, cmd_args);
		break;
	case CAM_IFE_CSID_SET_INIT_FRAME_DROP:
		rc = cam_ife_csid_set_init_frame_drop(csid_hw, cmd_args);
		break;
	default:
		CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
			csid_hw->hw_intf->hw_idx, cmd_type);
@@ -3033,6 +3150,9 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
	struct cam_hw_soc_info                         *soc_info;
	const struct cam_ife_csid_reg_offset           *csid_reg;
	const struct cam_ife_csid_csi2_rx_reg_offset   *csi2_reg;
	struct cam_ife_csid_path_cfg                   *path_data;
	const struct cam_ife_csid_pxl_reg_offset       *pxl_reg;
	const struct cam_ife_csid_rdi_reg_offset       *rdi_reg;
	uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0;
	uint32_t irq_status_rdi[4] = {0, 0, 0, 0};
	uint32_t val, irq_status_ppp = 0;
@@ -3264,6 +3384,53 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
				csid_hw->irq_debug_cnt++;
		}

		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
			(csid_hw->init_frame_drop) &&
			(csid_hw->ipp_res.res_state ==
			CAM_ISP_RESOURCE_STATE_STREAMING)) {
			csid_hw->res_sof_cnt[CAM_IFE_PIX_PATH_RES_IPP]++;
			CAM_DBG(CAM_ISP,
				"CSID:%d IPP SOF cnt:%d init_frame_drop:%d",
				csid_hw->hw_intf->hw_idx,
				csid_hw->res_sof_cnt[CAM_IFE_PIX_PATH_RES_IPP],
				csid_hw->init_frame_drop);
			if (csid_hw->res_sof_cnt[CAM_IFE_PIX_PATH_RES_IPP] ==
				csid_hw->init_frame_drop) {
				pxl_reg = csid_reg->ipp_reg;
				path_data = csid_hw->ipp_res.res_priv;
				if (path_data->sync_mode ==
					CAM_ISP_HW_SYNC_MASTER) {
					val = cam_io_r_mb(
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_ctrl_addr);

					val |=
					CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
					cam_io_w_mb(val,
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_ctrl_addr);

				} else if (path_data->sync_mode ==
					CAM_ISP_HW_SYNC_NONE) {
					cam_io_w_mb(
					CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_ctrl_addr);
				}

				if (!(csid_hw->csid_debug &
					CSID_DEBUG_ENABLE_SOF_IRQ)) {
					val = cam_io_r_mb(
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_irq_mask_addr);
					val &= ~(CSID_PATH_INFO_INPUT_SOF);
					cam_io_w_mb(val,
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_irq_mask_addr);
				}
			}
		}

		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
@@ -3299,6 +3466,52 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
				csid_hw->irq_debug_cnt++;
		}

		if ((irq_status_ppp & CSID_PATH_INFO_INPUT_SOF) &&
			(csid_hw->init_frame_drop) &&
			(csid_hw->ppp_res.res_state ==
			CAM_ISP_RESOURCE_STATE_STREAMING)) {
			csid_hw->res_sof_cnt[CAM_IFE_PIX_PATH_RES_PPP]++;
			CAM_DBG(CAM_ISP,
				"CSID:%d PPP SOF cnt:%d init_frame_drop:%d",
				csid_hw->hw_intf->hw_idx,
				csid_hw->res_sof_cnt[CAM_IFE_PIX_PATH_RES_PPP],
				csid_hw->init_frame_drop);
			if (csid_hw->res_sof_cnt[CAM_IFE_PIX_PATH_RES_PPP] ==
				csid_hw->init_frame_drop) {
				path_data = csid_hw->ppp_res.res_priv;
				pxl_reg = csid_reg->ppp_reg;
				if (path_data->sync_mode ==
					CAM_ISP_HW_SYNC_MASTER) {
					val = cam_io_r_mb(
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_ctrl_addr);

					val |=
					CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
					cam_io_w_mb(val,
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_ctrl_addr);
				} else if (path_data->sync_mode ==
					CAM_ISP_HW_SYNC_NONE) {
					cam_io_w_mb(
					CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
						soc_info->reg_map[0].mem_base +
						pxl_reg->csid_pxl_ctrl_addr);
				}

				if (!(csid_hw->csid_debug &
					CSID_DEBUG_ENABLE_SOF_IRQ)) {
					val = cam_io_r_mb(
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_irq_mask_addr);
					val &= ~(CSID_PATH_INFO_INPUT_SOF);
					cam_io_w_mb(val,
					soc_info->reg_map[0].mem_base +
					pxl_reg->csid_pxl_irq_mask_addr);
				}
			}
		}

		if ((irq_status_ppp & CSID_PATH_INFO_INPUT_EOF) &&
			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received",
@@ -3319,6 +3532,9 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
	}

	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
		path_data = (struct cam_ife_csid_path_cfg *)
			csid_hw->rdi_res[i].res_priv;
		rdi_reg = csid_reg->rdi_reg[i];
		if (irq_status_rdi[i] &
			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
			complete(&csid_hw->csid_rdin_complete[i]);
@@ -3332,6 +3548,35 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
				csid_hw->irq_debug_cnt++;
		}

		if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
			(path_data->init_frame_drop) &&
			(csid_hw->rdi_res[i].res_state ==
			CAM_ISP_RESOURCE_STATE_STREAMING)) {
			csid_hw->res_sof_cnt[i]++;
			CAM_DBG(CAM_ISP,
				"CSID:%d RDI:%d SOF cnt:%d init_frame_drop:%d",
				csid_hw->hw_intf->hw_idx, i,
				csid_hw->res_sof_cnt[i],
				path_data->init_frame_drop);
			if (csid_hw->res_sof_cnt[i] ==
				path_data->init_frame_drop) {
				cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
					soc_info->reg_map[0].mem_base +
					rdi_reg->csid_rdi_ctrl_addr);

				if (!(csid_hw->csid_debug &
					CSID_DEBUG_ENABLE_SOF_IRQ)) {
					val = cam_io_r_mb(
					soc_info->reg_map[0].mem_base +
					rdi_reg->csid_rdi_irq_mask_addr);
					val &= ~(CSID_PATH_INFO_INPUT_SOF);
					cam_io_w_mb(val,
					soc_info->reg_map[0].mem_base +
					rdi_reg->csid_rdi_irq_mask_addr);
				}
			}
		}

		if ((irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_EOF) &&
			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
			CAM_INFO_RATE_LIMIT(CAM_ISP,
+15 −0
Original line number Diff line number Diff line
@@ -419,6 +419,9 @@ struct cam_ife_csid_cid_data {
 * @master_idx:     For Slave reservation, Give master IFE instance Index.
 *                  Slave will synchronize with master Start and stop operations
 * @clk_rate        Clock rate
 * @usage_type      Usage type ie dual or single ife usecase
 * @init_frame_drop init frame drop value. In dual ife case rdi need to drop one
 *                  more frame than pix.
 *
 */
struct cam_ife_csid_path_cfg {
@@ -437,6 +440,8 @@ struct cam_ife_csid_path_cfg {
	enum cam_isp_hw_sync_mode       sync_mode;
	uint32_t                        master_idx;
	uint64_t                        clk_rate;
	uint32_t                        usage_type;
	uint32_t                        init_frame_drop;
};

/**
@@ -468,6 +473,13 @@ struct cam_ife_csid_path_cfg {
 * @irq_debug_cnt:            Counter to track sof irq's when above flag is set.
 * @error_irq_count           Error IRQ count, if continuous error irq comes
 *                            need to stop the CSID and mask interrupts.
 * @device_enabled            Device enabled will set once CSID powered on and
 *                            initial configuration are done.
 * @lock_state                csid spin lock
 * @dual_usage                usage type, dual ife or single ife
 * @init_frame_drop           Initial frame drop number
 * @res_sof_cnt               path resource sof count value. it used for initial
 *                            frame drop
 *
 */
struct cam_ife_csid_hw {
@@ -496,6 +508,9 @@ struct cam_ife_csid_hw {
	uint32_t                         error_irq_count;
	uint32_t                         device_enabled;
	spinlock_t                       lock_state;
	uint32_t                         dual_usage;
	uint32_t                         init_frame_drop;
	uint32_t                         res_sof_cnt[CAM_IFE_PIX_PATH_RES_MAX];
};

int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+2 −1
Original line number Diff line number Diff line
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -157,6 +157,7 @@ enum cam_ife_csid_cmd_type {
	CAM_IFE_CSID_CMD_GET_TIME_STAMP,
	CAM_IFE_CSID_SET_CSID_DEBUG,
	CAM_IFE_CSID_SOF_IRQ_DEBUG,
	CAM_IFE_CSID_SET_INIT_FRAME_DROP,
	CAM_IFE_CSID_CMD_MAX,
};

+11 −0
Original line number Diff line number Diff line
@@ -91,6 +91,7 @@
#define CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG   4
#define CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG           5
#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG_V2        6
#define CAM_ISP_GENERIC_BLOB_TYPE_INIT_FRAME_DROP     10

/* Per Path Usage Data */
#define CAM_ISP_USAGE_INVALID     0
@@ -500,4 +501,14 @@ struct cam_isp_acquire_hw_info {

#define CAM_ISP_ACQUIRE_OUT_SIZE_VER0       sizeof(struct cam_isp_out_port_info)

/**
 * struct cam_isp_init_frame_drop_config - init frame drop configuration
 *
 * @init_frame_drop:            Initial number of frames needs to drop
 */

struct cam_isp_init_frame_drop_config {
	uint32_t                       init_frame_drop;
} __attribute__((packed));

#endif /* __UAPI_CAM_ISP_H__ */