Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ebe79c0 authored by Venkat Chinta's avatar Venkat Chinta
Browse files

msm: camera: isp: Multiple fixes in ISP driver



Bandwidth control and optimization changes in IFE manager. Removes
unnecessary pause hardware from stop hardware sequence in IFE. Fixes
refcount not being released during initialization. Adds CSID global
reset to CSID disable hardware sequence.

Change-Id: Ie99bbcb6c309bd3ce4ca1df2e3eead37cacf4fb7
Signed-off-by: default avatarVenkat Chinta <vchinta@codeaurora.org>
Signed-off-by: default avatarVishalsingh Hajeri <vhajeri@codeaurora.org>
parent 3a2bdc26
Loading
Loading
Loading
Loading
+134 −126
Original line number Diff line number Diff line
@@ -242,6 +242,7 @@ static void cam_ife_hw_mgr_stop_hw_res(
{
	int i;
	struct cam_hw_intf      *hw_intf;
	uint32_t dummy_args;

	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
		if (!isp_hw_res->hw_res[i])
@@ -253,6 +254,12 @@ static void cam_ife_hw_mgr_stop_hw_res(
				sizeof(struct cam_isp_resource_node));
		else
			CAM_ERR(CAM_ISP, "stop null");
		if (hw_intf->hw_ops.process_cmd &&
			isp_hw_res->res_type == CAM_IFE_HW_MGR_RES_IFE_OUT) {
			hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
				CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
				&dummy_args, sizeof(dummy_args));
		}
	}
}

@@ -1100,11 +1107,13 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
			continue;

			/* get cid resource */
		rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
			rc = cam_ife_mgr_acquire_cid_res(ife_ctx,
				in_port, &cid_res_id,
				cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
				out_port->res_type));
			if (rc) {
			CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
				CAM_ERR(CAM_ISP,
					"Acquire IFE CID resource Failed");
				goto err;
		}

@@ -1679,6 +1688,48 @@ static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args)
	return rc;
}

static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx,
	enum cam_vfe_bw_control_action action)
{
	struct cam_ife_hw_mgr_res             *hw_mgr_res;
	struct cam_hw_intf                    *hw_intf;
	struct cam_vfe_bw_control_args         bw_ctrl_args;
	int                                    rc = -EINVAL;
	uint32_t                               i;

	CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);

	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
			if (!hw_mgr_res->hw_res[i])
				continue;

			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
			if (hw_intf && hw_intf->hw_ops.process_cmd) {
				bw_ctrl_args.node_res =
					hw_mgr_res->hw_res[i];
				bw_ctrl_args.action = action;

				rc = hw_intf->hw_ops.process_cmd(
					hw_intf->hw_priv,
					CAM_ISP_HW_CMD_BW_CONTROL,
					&bw_ctrl_args,
					sizeof(struct cam_vfe_bw_control_args));
				if (rc)
					CAM_ERR(CAM_ISP, "BW Update failed");
			} else
				CAM_WARN(CAM_ISP, "NULL hw_intf!");
		}
	}

	return rc;
}

static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx)
{
	return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_EXCLUDE);
}

/* entry function: stop_hw */
static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
{
@@ -1699,8 +1750,7 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
		return -EPERM;
	}

	CAM_DBG(CAM_ISP, " Enter...ctx id:%d",
		ctx->ctx_index);
	CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index);

	/* Set the csid halt command */
	if (!stop_args->args)
@@ -1715,6 +1765,25 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
		return -EINVAL;
	}

	CAM_DBG(CAM_ISP, "Halting CSIDs");

	if (cam_cdm_stream_off(ctx->cdm_handle))
		CAM_ERR(CAM_ISP, "CDM stream off failed %d",
			ctx->cdm_handle);
	cam_tasklet_stop(ctx->common.tasklet_info);

	CAM_DBG(CAM_ISP, "Going to stop IFE Mux");

	/* IFE mux in resources */
	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
	}

	CAM_DBG(CAM_ISP, "Going to stop IFE Out");

	/* IFE out resources */
	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
		cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
	/* get master base index first */
	for (i = 0; i < ctx->num_base; i++) {
		if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
@@ -1730,57 +1799,41 @@ static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
	if (i == ctx->num_base)
		master_base_idx = ctx->base[0].idx;

	/* Stop the master CSID path first */
	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
	/* Stop the master CIDs first */
	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
			master_base_idx, csid_halt_type);

	/* stop rest of the CSID paths  */
	/* stop rest of the CIDs  */
	for (i = 0; i < ctx->num_base; i++) {
		if (i == master_base_idx)
			continue;

		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
			ctx->base[i].idx, csid_halt_type);
	}

	/* Stop the master CIDs first */
	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
	/* Stop the master CSID path first */
	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
			master_base_idx, csid_halt_type);

	/* stop rest of the CIDs  */
	/* stop rest of the CSID paths  */
	for (i = 0; i < ctx->num_base; i++) {
		if (i == master_base_idx)
			continue;
		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
			ctx->base[i].idx, csid_halt_type);
	}

	if (cam_cdm_stream_off(ctx->cdm_handle))
		CAM_ERR(CAM_ISP, "CDM stream off failed %d",
			ctx->cdm_handle);

	/* IFE mux in resources */
	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
			ctx->base[i].idx, csid_halt_type);
	}

	/* IFE out resources */
	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
		cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);

	/* Update vote bandwidth should be done at the HW layer */

	cam_tasklet_stop(ctx->common.tasklet_info);

	/* Deinit IFE root node: do nothing */

	/* Deinit IFE CID */
	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
		CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CID\n", __func__);
		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
	}

	/* Deinit IFE CSID */
	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
		CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CSID\n", __func__);
		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
	}

@@ -2529,48 +2582,6 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
	return rc;
}

static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx,
	enum cam_vfe_bw_control_action action)
{
	struct cam_ife_hw_mgr_res             *hw_mgr_res;
	struct cam_hw_intf                    *hw_intf;
	struct cam_vfe_bw_control_args         bw_ctrl_args;
	int                                    rc = -EINVAL;
	uint32_t                               i;

	CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);

	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
			if (!hw_mgr_res->hw_res[i])
				continue;

			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
			if (hw_intf && hw_intf->hw_ops.process_cmd) {
				bw_ctrl_args.node_res =
					hw_mgr_res->hw_res[i];
				bw_ctrl_args.action = action;

				rc = hw_intf->hw_ops.process_cmd(
					hw_intf->hw_priv,
					CAM_ISP_HW_CMD_BW_CONTROL,
					&bw_ctrl_args,
					sizeof(struct cam_vfe_bw_control_args));
				if (rc)
					CAM_ERR(CAM_ISP, "BW Update failed");
			} else
				CAM_WARN(CAM_ISP, "NULL hw_intf!");
		}
	}

	return rc;
}

static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx)
{
	return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_EXCLUDE);
}

static int cam_ife_mgr_resume_hw(struct cam_ife_hw_mgr_ctx *ctx)
{
	return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_INCLUDE);
@@ -2996,7 +3007,7 @@ static int cam_ife_hw_mgr_handle_camif_error(
	void                              *handler_priv,
	void                              *payload)
{
	int32_t  error_status = CAM_ISP_HW_ERROR_NONE;
	int32_t  error_status;
	uint32_t core_idx;
	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
	struct cam_vfe_top_irq_evt_payload      *evt_payload;
@@ -3014,9 +3025,9 @@ static int cam_ife_hw_mgr_handle_camif_error(
		return error_status;

	switch (error_status) {
	case CAM_ISP_HW_ERROR_OVERFLOW:
	case CAM_ISP_HW_ERROR_P2I_ERROR:
	case CAM_ISP_HW_ERROR_VIOLATION:
	case CAM_VFE_IRQ_STATUS_OVERFLOW:
	case CAM_VFE_IRQ_STATUS_P2I_ERROR:
	case CAM_VFE_IRQ_STATUS_VIOLATION:
		CAM_DBG(CAM_ISP, "Enter: error_type (%d)", error_status);

		error_event_data.error_type =
@@ -3100,6 +3111,15 @@ static int cam_ife_hw_mgr_handle_reg_update(
				rup_status = hw_res->bottom_half_handler(
					hw_res, evt_payload);
			}

			if (ife_src_res->is_dual_vfe) {
				hw_res = ife_src_res->hw_res[0];
				if (core_idx == hw_res->hw_intf->hw_idx) {
					hw_res->bottom_half_handler(
						hw_res, evt_payload);
				}
			}

			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
				break;

@@ -3115,13 +3135,6 @@ static int cam_ife_hw_mgr_handle_reg_update(
		case CAM_ISP_HW_VFE_IN_RDI1:
		case CAM_ISP_HW_VFE_IN_RDI2:
		case CAM_ISP_HW_VFE_IN_RDI3:
			if (!ife_hwr_mgr_ctx->is_rdi_only_context)
				continue;

			/*
			 * This is RDI only context, send Reg update and epoch
			 * HW event to cam context
			 */
			hw_res = ife_src_res->hw_res[0];

			if (!hw_res) {
@@ -3133,6 +3146,9 @@ static int cam_ife_hw_mgr_handle_reg_update(
				rup_status = hw_res->bottom_half_handler(
					hw_res, evt_payload);

			if (!ife_hwr_mgr_ctx->is_rdi_only_context)
				continue;

			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
				break;
			if (!rup_status) {
@@ -3334,8 +3350,6 @@ static int cam_ife_hw_mgr_process_camif_sof(
	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx,
	struct cam_vfe_top_irq_evt_payload   *evt_payload)
{
	struct cam_isp_hw_sof_event_data      sof_done_event_data;
	cam_hw_event_cb_func                  ife_hwr_irq_sof_cb;
	struct cam_isp_resource_node         *hw_res_l = NULL;
	struct cam_isp_resource_node         *hw_res_r = NULL;
	int32_t rc = -EINVAL;
@@ -3351,9 +3365,6 @@ static int cam_ife_hw_mgr_process_camif_sof(
	CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
		isp_ife_camif_res->is_dual_vfe);

	ife_hwr_irq_sof_cb =
		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];

	switch (isp_ife_camif_res->is_dual_vfe) {
	/* Handling Single VFE Scenario */
	case 0:
@@ -3370,16 +3381,8 @@ static int cam_ife_hw_mgr_process_camif_sof(
				evt_payload);
			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
				break;
			if (!sof_status) {
				cam_ife_mgr_cmd_get_sof_timestamp(
					ife_hwr_mgr_ctx,
					&sof_done_event_data.timestamp);

				ife_hwr_irq_sof_cb(
					ife_hwr_mgr_ctx->common.cb_priv,
					CAM_ISP_HW_EVENT_SOF,
					&sof_done_event_data);
			}
			if (!sof_status)
				rc = 0;
		}

		break;
@@ -3431,15 +3434,6 @@ static int cam_ife_hw_mgr_process_camif_sof(
		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
			core_index0, core_index1, evt_payload->evt_id);

		if (!rc) {
			cam_ife_mgr_cmd_get_sof_timestamp(
					ife_hwr_mgr_ctx,
					&sof_done_event_data.timestamp);

			ife_hwr_irq_sof_cb(ife_hwr_mgr_ctx->common.cb_priv,
				CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
		}

		break;

	default:
@@ -3449,14 +3443,13 @@ static int cam_ife_hw_mgr_process_camif_sof(

	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);

	return 0;
	return rc;
}

static int cam_ife_hw_mgr_handle_sof(
	void                              *handler_priv,
	void                              *payload)
{
	int32_t rc = -EINVAL;
	struct cam_isp_resource_node         *hw_res = NULL;
	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx;
	struct cam_vfe_top_irq_evt_payload   *evt_payload;
@@ -3464,6 +3457,7 @@ static int cam_ife_hw_mgr_handle_sof(
	cam_hw_event_cb_func                  ife_hw_irq_sof_cb;
	struct cam_isp_hw_sof_event_data      sof_done_event_data;
	uint32_t  sof_status = 0;
	bool sof_sent = false;

	CAM_DBG(CAM_ISP, "Enter");

@@ -3489,13 +3483,13 @@ static int cam_ife_hw_mgr_handle_sof(
		case CAM_ISP_HW_VFE_IN_RDI1:
		case CAM_ISP_HW_VFE_IN_RDI2:
		case CAM_ISP_HW_VFE_IN_RDI3:
			/* check if it is rdi only context */
			if (ife_hw_mgr_ctx->is_rdi_only_context) {
			hw_res = ife_src_res->hw_res[0];
			sof_status = hw_res->bottom_half_handler(
				hw_res, evt_payload);

				if (!sof_status) {
			/* check if it is rdi only context */
			if (ife_hw_mgr_ctx->is_rdi_only_context) {
				if (!sof_status && !sof_sent) {
					cam_ife_mgr_cmd_get_sof_timestamp(
						ife_hw_mgr_ctx,
						&sof_done_event_data.timestamp);
@@ -3506,16 +3500,30 @@ static int cam_ife_hw_mgr_handle_sof(
						&sof_done_event_data);
					CAM_DBG(CAM_ISP, "sof_status = %d",
						sof_status);

					sof_sent = true;
				}

				/* this is RDI only context so exit from here */
				return 0;
			}
			break;

		case CAM_ISP_HW_VFE_IN_CAMIF:
			rc = cam_ife_hw_mgr_process_camif_sof(ife_src_res,
				ife_hw_mgr_ctx, evt_payload);
			sof_status = cam_ife_hw_mgr_process_camif_sof(
				ife_src_res, ife_hw_mgr_ctx, evt_payload);
			if (!sof_status && !sof_sent) {
				cam_ife_mgr_cmd_get_sof_timestamp(
					ife_hw_mgr_ctx,
					&sof_done_event_data.timestamp);

				ife_hw_irq_sof_cb(
					ife_hw_mgr_ctx->common.cb_priv,
					CAM_ISP_HW_EVENT_SOF,
					&sof_done_event_data);
				CAM_DBG(CAM_ISP, "sof_status = %d",
					sof_status);

				sof_sent = true;
			}
			break;
		default:
			CAM_ERR(CAM_ISP, "Invalid resource id :%d",
@@ -3832,7 +3840,7 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)

	evt_payload = evt_payload_priv;
	if (!handler_priv)
		goto put_payload;
		return rc;

	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;

+7 −1
Original line number Diff line number Diff line
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -431,11 +431,17 @@ int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
		irq_mask = cam_io_r_mb(controller->mem_base +
			controller->irq_register_arr[i].
			mask_reg_offset);
		CAM_DBG(CAM_ISP, "irq_mask 0x%x before disable 0x%x",
			controller->irq_register_arr[i].mask_reg_offset,
			irq_mask);
		irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);

		cam_io_w_mb(irq_mask, controller->mem_base +
			controller->irq_register_arr[i].
			mask_reg_offset);
		CAM_DBG(CAM_ISP, "irq_mask 0x%x after disable 0x%x",
			controller->irq_register_arr[i].mask_reg_offset,
			irq_mask);

		/* Clear the IRQ bits of this handler */
		cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+19 −0
Original line number Diff line number Diff line
@@ -50,6 +50,25 @@ enum cam_isp_hw_err_type {
	CAM_ISP_HW_ERROR_MAX,
};

/**
 *  enum cam_isp_hw_stop_cmd - Specify the stop command type
 */
enum cam_isp_hw_stop_cmd {
	CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY,
	CAM_ISP_HW_STOP_IMMEDIATELY,
	CAM_ISP_HW_STOP_MAX,
};

/**
 * struct cam_isp_stop_hw_method - hardware stop method
 *
 * @hw_stop_cmd:               Hardware stop command type information
 *
 */
struct cam_isp_stop_hw_method {
	enum cam_isp_hw_stop_cmd      hw_stop_cmd;
};

/**
 * struct cam_isp_bw_config_internal - Internal Bandwidth configuration
 *
+23 −197
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@

/* Timeout values in usec */
#define CAM_IFE_CSID_TIMEOUT_SLEEP_US                  1000
#define CAM_IFE_CSID_TIMEOUT_ALL_US                    1000000
#define CAM_IFE_CSID_TIMEOUT_ALL_US                    100000

/*
 * Constant Factors needed to change QTimer ticks to nanoseconds
@@ -355,8 +355,7 @@ static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
	struct cam_hw_soc_info          *soc_info;
	struct cam_ife_csid_reg_offset  *csid_reg;
	int rc = 0;
	uint32_t i, irq_mask_rx, irq_mask_ipp = 0,
		irq_mask_rdi[CAM_IFE_CSID_RDI_MAX];
	uint32_t val = 0, i;

	soc_info = &csid_hw->hw_info->soc_info;
	csid_reg = csid_hw->csid_info->csid_reg;
@@ -373,19 +372,6 @@ static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)

	init_completion(&csid_hw->csid_top_complete);

	/* Save interrupt mask registers values*/
	irq_mask_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);

	if (csid_reg->cmn_reg->no_pix)
		irq_mask_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);

	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
		irq_mask_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
		csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
	}

	/* Mask all interrupts */
	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
@@ -448,17 +434,11 @@ static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
		rc = 0;
	}

	/*restore all interrupt masks */
	cam_io_w_mb(irq_mask_rx, soc_info->reg_map[0].mem_base +
	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);

	if (csid_reg->cmn_reg->no_pix)
		cam_io_w_mb(irq_mask_ipp, soc_info->reg_map[0].mem_base +
			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);

	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++)
		cam_io_w_mb(irq_mask_rdi[i], soc_info->reg_map[0].mem_base +
			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
	if (val != 0)
		CAM_ERR(CAM_ISP, "CSID:%d IRQ value after reset rc = %d",
			csid_hw->hw_intf->hw_idx, val);

	return rc;
}
@@ -1058,6 +1038,10 @@ static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
	soc_info = &csid_hw->hw_info->soc_info;
	csid_reg = csid_hw->csid_info->csid_reg;

	CAM_DBG(CAM_ISP, "%s:Calling Global Reset\n", __func__);
	cam_ife_csid_global_reset(csid_hw);
	CAM_DBG(CAM_ISP, "%s:Global Reset Done\n", __func__);

	CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW",
		csid_hw->hw_intf->hw_idx);

@@ -1516,7 +1500,6 @@ static int cam_ife_csid_deinit_ipp_path(
	struct cam_isp_resource_node    *res)
{
	int rc = 0;
	uint32_t val = 0;
	struct cam_ife_csid_reg_offset      *csid_reg;
	struct cam_hw_soc_info              *soc_info;

@@ -1538,13 +1521,6 @@ static int cam_ife_csid_deinit_ipp_path(
		rc = -EINVAL;
	}

	/* Disable the IPP path */
	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
		csid_reg->ipp_reg->csid_ipp_cfg0_addr);

	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
	return rc;
}
@@ -1615,7 +1591,6 @@ static int cam_ife_csid_disable_ipp_path(
	enum cam_ife_csid_halt_cmd       stop_cmd)
{
	int rc = 0;
	uint32_t val = 0;
	struct cam_ife_csid_reg_offset       *csid_reg;
	struct cam_hw_soc_info               *soc_info;
	struct cam_ife_csid_path_cfg         *path_data;
@@ -1661,19 +1636,8 @@ static int cam_ife_csid_disable_ipp_path(
	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
		csid_hw->hw_intf->hw_idx, res->res_id);

	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
		/* configure Halt */
		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
		val &= ~0x3;
		val |= stop_cmd;
		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
			csid_reg->ipp_reg->csid_ipp_ctrl_addr);
	} else if (path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
		cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
			csid_reg->ipp_reg->csid_ipp_ctrl_addr);

	/* For slave mode, halt command should take it from master */
	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);

	return rc;
}
@@ -1815,7 +1779,7 @@ static int cam_ife_csid_deinit_rdi_path(
	struct cam_isp_resource_node    *res)
{
	int rc = 0;
	uint32_t val = 0, id;
	uint32_t id;
	struct cam_ife_csid_reg_offset      *csid_reg;
	struct cam_hw_soc_info              *soc_info;

@@ -1832,13 +1796,6 @@ static int cam_ife_csid_deinit_rdi_path(
		return -EINVAL;
	}

	/* Disable the RDI path */
	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
	val &= ~(1 << csid_reg->cmn_reg->path_en_shift_val);
	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);

	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
	return rc;
}
@@ -1935,9 +1892,8 @@ static int cam_ife_csid_disable_rdi_path(
	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
		csid_hw->hw_intf->hw_idx, res->res_id);

	/*Halt the RDI path */
	cam_io_w_mb(stop_cmd, soc_info->reg_map[0].mem_base +
			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);

	return rc;
}
@@ -2014,79 +1970,6 @@ static int cam_ife_csid_set_csid_debug(struct cam_ife_csid_hw *csid_hw,
	return 0;
}

static int cam_ife_csid_res_wait_for_halt(
	struct cam_ife_csid_hw          *csid_hw,
	struct cam_isp_resource_node    *res)
{
	int rc = 0;
	uint32_t val = 0, id, status, path_status_reg;
	struct cam_ife_csid_reg_offset      *csid_reg;
	struct cam_hw_soc_info              *soc_info;

	csid_reg = csid_hw->csid_info->csid_reg;
	soc_info = &csid_hw->hw_info->soc_info;

	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
			csid_hw->hw_intf->hw_idx, res->res_id);
		return -EINVAL;
	}

	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
		CAM_ERR_RATE_LIMIT(CAM_ISP,
			"CSID:%d Res:%d already in stopped state:%d",
			csid_hw->hw_intf->hw_idx,
			res->res_id, res->res_state);
		return rc;
	}

	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
		CAM_ERR_RATE_LIMIT(CAM_ISP,
			"CSID:%d Res:%d Invalid state%d",
			csid_hw->hw_intf->hw_idx, res->res_id,
			res->res_state);
		return -EINVAL;
	}

	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
		path_status_reg = csid_reg->ipp_reg->csid_ipp_status_addr;
	else
		path_status_reg = csid_reg->rdi_reg[res->res_id]->
			csid_rdi_status_addr;

	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
		path_status_reg, status,
		(status == 1),
		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
	if (rc < 0) {
		CAM_ERR(CAM_ISP, "Time out: Res id:%d Path has not halted",
			res->res_id);
		rc = -ETIMEDOUT;
	}

	/* Disable the interrupt */
	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
				csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
				CSID_PATH_ERROR_FIFO_OVERFLOW);
		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
			csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
	} else {
		id = res->res_id;
		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
		val &= ~(CSID_PATH_INFO_INPUT_EOF | CSID_PATH_INFO_RST_DONE |
			CSID_PATH_ERROR_FIFO_OVERFLOW);
		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
	}
	/* set state to init HW */
	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
	return rc;
}

static int cam_ife_csid_get_hw_caps(void *hw_priv,
	void *get_hw_cap_args, uint32_t arg_size)
{
@@ -2307,7 +2190,6 @@ static int cam_ife_csid_reset_retain_sw_reg(
	return rc;
}


static int cam_ife_csid_init_hw(void *hw_priv,
	void *init_args, uint32_t arg_size)
{
@@ -2379,7 +2261,6 @@ static int cam_ife_csid_init_hw(void *hw_priv,
	rc = cam_ife_csid_reset_retain_sw_reg(csid_hw);
	if (rc < 0) {
		CAM_ERR(CAM_ISP, "CSID: Failed in SW reset");
		return rc;
	}

	if (rc)
@@ -2403,6 +2284,7 @@ static int cam_ife_csid_deinit_hw(void *hw_priv,
		return -EINVAL;
	}

	CAM_DBG(CAM_ISP, "Enter");
	res = (struct cam_isp_resource_node *)deinit_args;
	csid_hw_info = (struct cam_hw_info  *)hw_priv;
	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
@@ -2417,9 +2299,11 @@ static int cam_ife_csid_deinit_hw(void *hw_priv,

	switch (res->res_type) {
	case CAM_ISP_RESOURCE_CID:
		CAM_DBG(CAM_ISP, "De-Init ife_csid");
		rc = cam_ife_csid_disable_csi2(csid_hw, res);
		break;
	case CAM_ISP_RESOURCE_PIX_PATH:
		CAM_DBG(CAM_ISP, "De-Init Pix Path: %d\n", res->res_id);
		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP)
			rc = cam_ife_csid_deinit_ipp_path(csid_hw, res);
		else
@@ -2434,7 +2318,9 @@ static int cam_ife_csid_deinit_hw(void *hw_priv,
	}

	/* Disable CSID HW */
	CAM_DBG(CAM_ISP, "Disabling CSID Hw\n");
	cam_ife_csid_disable_hw(csid_hw);
	CAM_DBG(CAM_ISP, "%s: Exit\n", __func__);

end:
	mutex_unlock(&csid_hw->hw_info->hw_mutex);
@@ -2540,16 +2426,13 @@ static int cam_ife_csid_stop(void *hw_priv,
		}
	}

	/*wait for the path to halt */
	for (i = 0; i < csid_stop->num_res; i++) {
		res = csid_stop->node_res[i];
		if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
			csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
			rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
		else
		res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
	}

	CAM_DBG(CAM_ISP,  "%s: Exit\n", __func__);

	return rc;

}
@@ -2602,35 +2485,8 @@ static int cam_ife_csid_process_cmd(void *hw_priv,

}

static int cam_ife_csid_halt_device(
	struct cam_ife_csid_hw *csid_hw)
{
	uint32_t  i;
	int rc = 0;
	struct cam_isp_resource_node *res_node;

	res_node = &csid_hw->ipp_res;
	if (res_node->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
		rc = cam_ife_csid_disable_ipp_path(csid_hw,
			res_node, CAM_CSID_HALT_IMMEDIATELY);
		res_node->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
	}

	for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++) {
		res_node = &csid_hw->rdi_res[i];
		if (res_node->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
			rc = cam_ife_csid_disable_rdi_path(csid_hw,
				res_node, CAM_CSID_HALT_IMMEDIATELY);
			res_node->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
		}
	}
	return rc;
}


irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
{
	int rc = 0;
	struct cam_ife_csid_hw          *csid_hw;
	struct cam_hw_soc_info          *soc_info;
	struct cam_ife_csid_reg_offset  *csid_reg;
@@ -2704,52 +2560,22 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
			 csid_hw->hw_intf->hw_idx);
		rc = cam_ife_csid_halt_device(csid_hw);
		if (rc) {
			CAM_ERR_RATE_LIMIT(CAM_ISP,
				"CSID:%d csid halt device fail rc = %d",
				csid_hw->hw_intf->hw_idx, rc);
		}
	}
	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
			 csid_hw->hw_intf->hw_idx);
		rc = cam_ife_csid_halt_device(csid_hw);
		if (rc) {
			CAM_ERR_RATE_LIMIT(CAM_ISP,
				"CSID:%d csid halt device fail rc = %d",
				csid_hw->hw_intf->hw_idx, rc);
		}
	}
	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
			 csid_hw->hw_intf->hw_idx);
		rc = cam_ife_csid_halt_device(csid_hw);
		if (rc) {
			CAM_ERR_RATE_LIMIT(CAM_ISP,
				"CSID:%d csid halt device fail rc = %d",
				csid_hw->hw_intf->hw_idx, rc);
		}
	}
	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
			 csid_hw->hw_intf->hw_idx);
		rc = cam_ife_csid_halt_device(csid_hw);
		if (rc) {
			CAM_ERR_RATE_LIMIT(CAM_ISP,
				"CSID:%d csid halt device fail rc = %d",
				csid_hw->hw_intf->hw_idx, rc);
		}
	}
	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER  FLOW",
			 csid_hw->hw_intf->hw_idx);
		rc = cam_ife_csid_halt_device(csid_hw);
		if (rc) {
			CAM_ERR_RATE_LIMIT(CAM_ISP,
				"CSID:%d csid halt device fail rc = %d",
				csid_hw->hw_intf->hw_idx, rc);
		}
	}
	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_EOT_RECEPTION",
+0 −1
Original line number Diff line number Diff line
@@ -360,7 +360,6 @@ struct cam_ife_csid_tpg_cfg {
 * @dt:          Data type
 * @cnt:         Cid resource reference count.
 * @tpg_set:     Tpg used for this cid resource
 * @pixel_count: Pixel resource connected
 *
 */
struct cam_ife_csid_cid_data {
Loading