Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 71fc3c55 authored by Vishalsingh Hajeri's avatar Vishalsingh Hajeri Committed by vhajeri
Browse files

msm: camera: icp: Fix for ICP init failures



Add barriers to ensure read and write access
has no contention and add retry logic to counter
ICP init failures.

Change-Id: I5c947fec42dc6787cce10a9602a53beaf3a2b3f0
Signed-off-by: default avatarvhajeri <vhajeri@codeaurora.org>
parent eeeb1c3a
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -110,7 +110,7 @@ void cam_hfi_disable_cpu(void __iomem *icp_base);
/**
 * cam_hfi_deinit() - cleanup HFI
 */
void cam_hfi_deinit(void);
void cam_hfi_deinit(void __iomem *icp_base);
/**
 * hfi_set_debug_level() - set debug level
 * @lvl: FW debug message level
+25 −1
Original line number Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,30 @@
#define HFI_IPEBPS_HANDLE_TYPE_IPE_RT                   0x2
#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT               0x3

/**
 * struct abort_data
 * @num_req_ids: Number of req ids
 * @num_req_id: point to specific req id
 *
 * create abort data
 */
struct abort_data {
	uint32_t num_req_ids;
	uint32_t num_req_id[1];
};

/**
 * struct hfi_cmd_data
 * @abort: abort data
 * @user data: user supplied argument
 *
 * create session abort data
 */
struct hfi_cmd_abort {
	struct abort_data abort;
	uint64_t user_data;
} __packed;

/**
 * struct hfi_cmd_abort_destroy
 * @user_data: user supplied data
+95 −40
Original line number Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -124,7 +124,7 @@ int hfi_write_cmd(void *cmd_ptr)
	 * firmware to process
	 */
	wmb();
	cam_io_w((uint32_t)INTR_ENABLE,
	cam_io_w_mb((uint32_t)INTR_ENABLE,
		g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
err:
	mutex_unlock(&hfi_cmd_q_mutex);
@@ -222,6 +222,10 @@ int hfi_read_message(uint32_t *pmsg, uint8_t q_id,

	q->qhdr_read_idx = new_read_idx;
	*words_read = size_in_words;
	/* Memory Barrier to make sure message
	 * queue parameters are updated after read
	 */
	wmb();
err:
	mutex_unlock(&hfi_msg_q_mutex);
	return rc;
@@ -445,17 +449,17 @@ void cam_hfi_disable_cpu(void __iomem *icp_base)

	val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
	val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
	cam_io_w(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
	cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);

	val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
	cam_io_w(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
	cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
}

void cam_hfi_enable_cpu(void __iomem *icp_base)
{
	cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN,
	cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN,
			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
	cam_io_w((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
	cam_io_w_mb((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
}

int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
@@ -464,23 +468,11 @@ int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
	int rc = 0;
	uint32_t data;
	uint32_t fw_version, status = 0;
	uint32_t retry_cnt = 0;

	cam_hfi_enable_cpu(icp_base);
	g_hfi->csr_base = icp_base;

	rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
		status, status != ICP_INIT_RESP_SUCCESS, 15, 200);

	if (rc) {
		CAM_ERR(CAM_HFI, "timed out , status = %u", status);
		return -EINVAL;
	}

	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
	CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);

	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);

	if (debug) {
		cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
			(icp_base + HFI_REG_A5_CSR_A5_CONTROL));
@@ -499,20 +491,54 @@ int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
	}

	while (retry_cnt < HFI_MAX_POLL_TRY) {
		readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
			status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);

		CAM_DBG(CAM_HFI, "1: status = %u", status);
		status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
		CAM_DBG(CAM_HFI, "2: status = %u", status);
		if (status == ICP_INIT_RESP_SUCCESS)
			break;

		if (status == ICP_INIT_RESP_FAILED) {
			CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
				status);
			fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
			CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
			return -EINVAL;
		}
		retry_cnt++;
	}

	if ((retry_cnt == HFI_MAX_POLL_TRY) &&
		(status == ICP_INIT_RESP_RESET)) {
		CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
				status);
		fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
		CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
		return -EINVAL;
	}

	cam_io_w_mb((uint32_t)INTR_ENABLE,
		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);

	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
	CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);

	data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
	CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);

	cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
	cam_io_w((uint32_t)hfi_mem->shmem.iova,
	cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
	cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
		icp_base + HFI_REG_SHARED_MEM_PTR);
	cam_io_w((uint32_t)hfi_mem->shmem.len,
	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
		icp_base + HFI_REG_SHARED_MEM_SIZE);
	cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
	cam_io_w((uint32_t)hfi_mem->sec_heap.len,
	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);

	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
	return rc;
}

@@ -524,6 +550,7 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
	struct hfi_qtbl_hdr *qtbl_hdr;
	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
	uint32_t hw_version, soc_version, fw_version, status = 0;
	uint32_t retry_cnt = 0;

	mutex_lock(&hfi_cmd_q_mutex);
	mutex_lock(&hfi_msg_q_mutex);
@@ -560,7 +587,7 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
		 * disabling the clock gating on both V1 and V2 until the
		 * hardware team root causes this
		 */
		cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN |
		cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN |
			ICP_FLAG_CSR_WAKE_UP_EN |
			ICP_CSR_EN_CLKGATE_WFI,
			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
@@ -677,24 +704,48 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
		break;
	}

	cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
	cam_io_w((uint32_t)hfi_mem->shmem.iova,
	cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
	cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
		icp_base + HFI_REG_SHARED_MEM_PTR);
	cam_io_w((uint32_t)hfi_mem->shmem.len,
	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
		icp_base + HFI_REG_SHARED_MEM_SIZE);
	cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
	cam_io_w((uint32_t)hfi_mem->sec_heap.len,
	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
	cam_io_w((uint32_t)ICP_INIT_REQUEST_SET,
	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET,
		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);

	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);

	rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
		status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
	if (rc) {
		CAM_ERR(CAM_HFI, "timed out , status = %u", status);
	while (retry_cnt < HFI_MAX_POLL_TRY) {
		readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
			status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);

		CAM_DBG(CAM_HFI, "1: status = %u rc = %d", status, rc);
		status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
		CAM_DBG(CAM_HFI, "2: status = %u rc = %d", status, rc);
		if (status == ICP_INIT_RESP_SUCCESS)
			break;

		if (status == ICP_INIT_RESP_FAILED) {
			CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
				status);
			fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
			CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
			goto regions_fail;
		}
		retry_cnt++;
	}

	if ((retry_cnt == HFI_MAX_POLL_TRY) &&
		(status == ICP_INIT_RESP_RESET)) {
		CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
				status);
		fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
		CAM_ERR(CAM_HFI,
			"hw version : : [%x], fw version : [%x]",
			hw_version, fw_version);
		goto regions_fail;
	}

@@ -706,7 +757,8 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
	g_hfi->hfi_state = HFI_READY;
	g_hfi->cmd_q_state = true;
	g_hfi->msg_q_state = true;
	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
	cam_io_w_mb((uint32_t)INTR_ENABLE,
		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);

	mutex_unlock(&hfi_cmd_q_mutex);
	mutex_unlock(&hfi_msg_q_mutex);
@@ -714,14 +766,14 @@ int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
	return rc;
regions_fail:
	kfree(g_hfi);
	g_hfi = NULL;
alloc_fail:
	mutex_unlock(&hfi_cmd_q_mutex);
	mutex_unlock(&hfi_msg_q_mutex);
	return rc;
}


void cam_hfi_deinit(void)
void cam_hfi_deinit(void __iomem *icp_base)
{
	mutex_lock(&hfi_cmd_q_mutex);
	mutex_lock(&hfi_msg_q_mutex);
@@ -734,7 +786,10 @@ void cam_hfi_deinit(void)
	g_hfi->cmd_q_state = false;
	g_hfi->msg_q_state = false;

	cam_io_w((uint32_t)INTR_DISABLE,
	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);

	cam_io_w_mb((uint32_t)INTR_DISABLE,
		g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
	kzfree(g_hfi);
	g_hfi = NULL;
+114 −40
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -1070,7 +1071,11 @@ static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
		if (hw_mgr->bps_ctxt_cnt++)
			goto end;
		bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
		if (!hw_mgr->bps_clk_state) {
			bps_dev_intf->hw_ops.init(
				bps_dev_intf->hw_priv, NULL, 0);
			hw_mgr->bps_clk_state = true;
		}
		if (icp_hw_mgr.ipe_bps_pc_flag) {
			bps_dev_intf->hw_ops.process_cmd(
				bps_dev_intf->hw_priv,
@@ -1080,15 +1085,18 @@ static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
	} else {
		if (hw_mgr->ipe_ctxt_cnt++)
			goto end;

		ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
		if (!hw_mgr->ipe_clk_state)
			ipe0_dev_intf->hw_ops.init(
				ipe0_dev_intf->hw_priv, NULL, 0);
		if (icp_hw_mgr.ipe_bps_pc_flag) {
			ipe0_dev_intf->hw_ops.process_cmd(
				ipe0_dev_intf->hw_priv,
				CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
		}

		if ((icp_hw_mgr.ipe1_enable) && (ipe1_dev_intf)) {
		if ((icp_hw_mgr.ipe1_enable) &&
			(ipe1_dev_intf) &&
			(!hw_mgr->ipe_clk_state)) {
			ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
				NULL, 0);

@@ -1099,6 +1107,7 @@ static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
					NULL, 0);
			}
		}
		hw_mgr->ipe_clk_state = true;
		if (icp_hw_mgr.ipe_bps_pc_flag) {
			hw_mgr->core_info = hw_mgr->core_info |
				(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1);
@@ -1108,6 +1117,8 @@ static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
	CAM_DBG(CAM_ICP, "core_info %X",  hw_mgr->core_info);
	if (icp_hw_mgr.ipe_bps_pc_flag)
		rc = hfi_enable_ipe_bps_pc(true, hw_mgr->core_info);
	else if (icp_hw_mgr.icp_pc_flag)
		rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
	else
		rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
end:
@@ -1153,7 +1164,11 @@ static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
				hw_mgr->core_info & (~ICP_PWR_CLP_BPS);
		}

		bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
		if (hw_mgr->bps_clk_state) {
			bps_dev_intf->hw_ops.deinit
				(bps_dev_intf->hw_priv, NULL, 0);
			hw_mgr->bps_clk_state = false;
		}
	} else {
		CAM_DBG(CAM_ICP, "ipe ctx cnt %d", hw_mgr->ipe_ctxt_cnt);
		if (ctx_data)
@@ -1168,7 +1183,10 @@ static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
				CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);

		}
		ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);

		if (hw_mgr->ipe_clk_state)
			ipe0_dev_intf->hw_ops.deinit(
				ipe0_dev_intf->hw_priv, NULL, 0);

		if (ipe1_dev_intf) {
			if (icp_hw_mgr.ipe_bps_pc_flag) {
@@ -1178,9 +1196,12 @@ static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
					NULL, 0);
			}

		if (hw_mgr->ipe_clk_state)
			ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
				NULL, 0);
		}

		hw_mgr->ipe_clk_state = false;
		if (icp_hw_mgr.ipe_bps_pc_flag) {
			hw_mgr->core_info = hw_mgr->core_info &
				(~(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1));
@@ -1331,7 +1352,7 @@ static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data)
				ctx_data->hfi_frame_process.in_resource[i]);
			cam_sync_destroy(
				ctx_data->hfi_frame_process.in_resource[i]);
			ctx_data->hfi_frame_process.in_resource[i] = 0;
			ctx_data->hfi_frame_process.in_free_resource[i] = 0;
		}
		hfi_frame_process->fw_process_flag[i] = false;
		clear_bit(i, ctx_data->hfi_frame_process.bitmap);
@@ -1341,7 +1362,7 @@ static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data)
		if (!hfi_frame_process->in_free_resource[i])
			continue;

		CAM_INFO(CAM_ICP, "Delete merged sync in object: %d",
		CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
			ctx_data->hfi_frame_process.in_free_resource[i]);
		cam_sync_destroy(
			ctx_data->hfi_frame_process.in_free_resource[i]);
@@ -1502,6 +1523,7 @@ static int cam_icp_mgr_process_msg_create_handle(uint32_t *msg_ptr)
{
	struct hfi_msg_create_handle_ack *create_handle_ack = NULL;
	struct cam_icp_hw_ctx_data *ctx_data = NULL;
	int rc = 0;

	create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
	if (!create_handle_ack) {
@@ -1518,11 +1540,15 @@ static int cam_icp_mgr_process_msg_create_handle(uint32_t *msg_ptr)
	if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) {
		ctx_data->fw_handle = create_handle_ack->fw_handle;
		CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
	} else {
		CAM_WARN(CAM_ICP,
			"This ctx is no longer in use current state: %d",
			ctx_data->state);
		ctx_data->fw_handle = 0;
		rc = -EPERM;
	}
	complete(&ctx_data->wait_complete);
	} else
		CAM_WARN(CAM_ICP, "Timeout failed to create fw handle");

	return 0;
	return rc;
}

static int cam_icp_mgr_process_msg_ping_ack(uint32_t *msg_ptr)
@@ -1586,20 +1612,38 @@ static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
{
	struct cam_icp_hw_ctx_data *ctx_data = NULL;
	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
	struct cam_hw_intf *a5_dev_intf = NULL;
	struct cam_hw_info *a5_dev = NULL;
	int rc = 0;

	a5_dev_intf = icp_hw_mgr.a5_dev_intf;
	if (!a5_dev_intf) {
		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
		return -EINVAL;
	}
	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
	switch (msg_ptr[ICP_PACKET_OPCODE]) {
	case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
	case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
	case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
		CAM_DBG(CAM_ICP, "received IPE/BPS_DESTROY/ABORT:");
		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
		ctx_data =
			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
		if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
			complete(&ctx_data->wait_complete);

		CAM_DBG(CAM_ICP, "received IPE/BPS/ ABORT: ctx_state =%d",
			ctx_data->state);
		break;
	case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
		ctx_data =
			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
		if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
			(ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) {
			complete(&ctx_data->wait_complete);
		}
		CAM_DBG(CAM_ICP, "received IPE/BPS/ DESTROY: ctx_state =%d",
			ctx_data->state);
		break;
	default:
		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
@@ -1607,7 +1651,6 @@ static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
		rc = -EINVAL;
		break;
	}

	return rc;
}

@@ -2008,12 +2051,20 @@ static int cam_ipe_bps_deint(struct cam_icp_hw_mgr *hw_mgr)
		return 0;
	}

	if (ipe1_dev_intf) {
	if (ipe1_dev_intf && hw_mgr->ipe_clk_state) {
		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
				NULL, 0);
	}

	if (hw_mgr->ipe_clk_state)
		ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
	if (hw_mgr->bps_clk_state)
		bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);


	hw_mgr->bps_clk_state = false;
	hw_mgr->ipe_clk_state = false;

	return 0;
}
static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
@@ -2031,12 +2082,15 @@ static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
	}
	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;

	if (!hw_mgr->icp_pc_flag)
	if (!hw_mgr->icp_pc_flag) {
		cam_hfi_disable_cpu(
			a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
		rc = cam_icp_mgr_hw_close(hw_mgr, NULL);
	else
	} else {
		rc = cam_icp_mgr_send_pc_prep(hw_mgr);

	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
		cam_hfi_disable_cpu(
			a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
	}
	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
	CAM_DBG(CAM_ICP, "EXIT");

@@ -2108,9 +2162,10 @@ static int cam_icp_mgr_abort_handle(

	packet_size =
		sizeof(struct hfi_cmd_ipebps_async) +
		sizeof(struct hfi_cmd_abort_destroy) -
		sizeof(struct hfi_cmd_abort) -
		sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
	abort_cmd = kzalloc(packet_size, GFP_KERNEL);
	CAM_DBG(CAM_ICP, "abort pkt size = %d", (int) packet_size);
	if (!abort_cmd) {
		rc = -ENOMEM;
		return rc;
@@ -2128,8 +2183,6 @@ static int cam_icp_mgr_abort_handle(
	abort_cmd->fw_handles[0] = ctx_data->fw_handle;
	abort_cmd->user_data1 = (uint64_t)ctx_data;
	abort_cmd->user_data2 = (uint64_t)0x0;
	memcpy(abort_cmd->payload.direct, &ctx_data->temp_payload,
		sizeof(uint64_t));

	task_data = (struct hfi_cmd_work_data *)task->payload;
	task_data->data = (void *)abort_cmd;
@@ -2247,6 +2300,7 @@ static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
		&hw_mgr->ctx_data[ctx_id], 0);
	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_RELEASE;
	CAM_DBG(CAM_ICP, "E: ctx_id = %d", ctx_id);
	cam_icp_mgr_abort_handle(&hw_mgr->ctx_data[ctx_id]);
	cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
	cam_icp_mgr_cleanup_ctx(&hw_mgr->ctx_data[ctx_id]);

@@ -2292,12 +2346,15 @@ static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
	hw_mgr->bps_clk_state = false;
	hw_mgr->ipe_clk_state = false;
}

static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
{
	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
	struct cam_hw_intf *a5_dev_intf = NULL;
	struct cam_hw_info *a5_dev = NULL;
	struct cam_icp_a5_set_irq_cb irq_cb;
	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
	int rc = 0;
@@ -2309,14 +2366,13 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
		mutex_unlock(&hw_mgr->hw_mgr_mutex);
		return 0;
	}

	a5_dev_intf = hw_mgr->a5_dev_intf;
	if (!a5_dev_intf) {
		CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
		mutex_unlock(&hw_mgr->hw_mgr_mutex);
		return -EINVAL;
	}

	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
	fw_buf_info.kva = 0;
	fw_buf_info.iova = 0;
	fw_buf_info.len = 0;
@@ -2327,9 +2383,8 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
		sizeof(fw_buf_info));
	if (rc)
		CAM_ERR(CAM_ICP, "nullify the fw buf failed");

	cam_hfi_deinit();

	cam_hfi_deinit(
		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
	irq_cb.icp_hw_mgr_cb = NULL;
	irq_cb.data = NULL;
	rc = a5_dev_intf->hw_ops.process_cmd(
@@ -2343,7 +2398,6 @@ static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
	hw_mgr->fw_download = false;
	hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
	mutex_unlock(&hw_mgr->hw_mgr_mutex);

	CAM_DBG(CAM_ICP, "Exit");
	return rc;
}
@@ -2385,11 +2439,16 @@ static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
			goto ipe1_dev_init_failed;
	}

	hw_mgr->bps_clk_state = true;
	hw_mgr->ipe_clk_state = true;

	return rc;
ipe1_dev_init_failed:
	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
	hw_mgr->ipe_clk_state = false;
ipe0_dev_init_failed:
	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
	hw_mgr->bps_clk_state = false;
bps_dev_init_failed:
	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
a5_dev_init_failed:
@@ -2619,17 +2678,26 @@ static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
	if (download_fw_args)
		icp_pc = *((bool *)download_fw_args);

	if (download_fw_args && icp_pc == true && hw_mgr->icp_pc_flag) {
		rc = cam_ipe_bps_deint(hw_mgr);
		CAM_DBG(CAM_ICP, "deinit all clocks");
	}

	if (download_fw_args && icp_pc == true)
		return rc;

	rc = cam_ipe_bps_deint(hw_mgr);
	rc = cam_icp_mgr_icp_power_collapse(hw_mgr);
	CAM_DBG(CAM_ICP, "deinit all clocks at boot up");

	return rc;

fw_init_failed:
	cam_hfi_deinit();
	cam_hfi_deinit(
		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
hfi_init_failed:
	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
	cam_hfi_disable_cpu(
		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
fw_download_failed:
	cam_icp_mgr_device_deinit(hw_mgr);
dev_init_fail:
@@ -3365,6 +3433,7 @@ static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
		mutex_lock(&hw_mgr->hw_mgr_mutex);
		cam_icp_hw_mgr_reset_clk_info(hw_mgr);
		hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
		rc = cam_ipe_bps_deint(hw_mgr);
	}
	mutex_unlock(&hw_mgr->hw_mgr_mutex);

@@ -3458,6 +3527,11 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
	}

	if (ctx_data->fw_handle == 0) {
		CAM_ERR(CAM_ICP, "Invalid handle created");
		rc = -EINVAL;
	}

	return rc;
}

@@ -3621,12 +3695,13 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
	}
	ctx_data = &hw_mgr->ctx_data[ctx_id];
	ctx_data->ctx_id = ctx_id;
	mutex_unlock(&hw_mgr->hw_mgr_mutex);

	mutex_lock(&ctx_data->ctx_mutex);
	rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
	if (rc)
	if (rc) {
		mutex_unlock(&hw_mgr->hw_mgr_mutex);
		goto acquire_info_failed;
	}
	icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;

	rc = cam_mem_get_io_buf(
@@ -3635,6 +3710,7 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
		&io_buf_addr, &io_buf_size);
	if (rc) {
		CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
		mutex_unlock(&hw_mgr->hw_mgr_mutex);
		goto get_io_buf_failed;
	}

@@ -3642,7 +3718,6 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
		icp_dev_acquire_info->io_config_cmd_handle,
		(void *)io_buf_addr, io_buf_size);

	mutex_lock(&hw_mgr->hw_mgr_mutex);
	if (!hw_mgr->ctxt_cnt) {
		rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
		if (rc) {
@@ -4041,7 +4116,6 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
		goto icp_wq_create_failed;

	init_completion(&icp_hw_mgr.a5_complete);

	return rc;

icp_wq_create_failed:
+8 −0
Original line number Diff line number Diff line
@@ -277,6 +277,12 @@ struct cam_icp_clk_info {
 * @ipe1_enable: Flag for IPE1
 * @bps_enable: Flag for BPS
 * @core_info: 32 bit value , tells IPE0/1 and BPS
 * @a5_dev_intf : Device interface for A5
 * @ipe0_dev_intf: Device interface for IPE0
 * @ipe1_dev_intf: Device interface for IPE1
 * @bps_dev_intf: Device interface for BPS
 * @ipe_clk_state: IPE clock state flag
 * @bps_clk_state: BPS clock state flag
 */
struct cam_icp_hw_mgr {
	struct mutex hw_mgr_mutex;
@@ -319,6 +325,8 @@ struct cam_icp_hw_mgr {
	struct cam_hw_intf *ipe0_dev_intf;
	struct cam_hw_intf *ipe1_dev_intf;
	struct cam_hw_intf *bps_dev_intf;
	bool ipe_clk_state;
	bool bps_clk_state;
};

static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
Loading