Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20fc6c64 authored by Jeyaprakash Soundrapandian's avatar Jeyaprakash Soundrapandian Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: camera: icp: Properly handle context release" into dev/msm-4.9-camx

parents 1837a20a 8e06cfbc
Loading
Loading
Loading
Loading
+47 −31
Original line number Diff line number Diff line
@@ -29,6 +29,7 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
	void *done_event_data, uint32_t bubble_state)
{
	int j;
	int result;
	struct cam_ctx_request *req;
	struct cam_hw_done_event_data *done =
		(struct cam_hw_done_event_data *)done_event_data;
@@ -53,9 +54,13 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
	}

	list_del_init(&req->list);
	if (!bubble_state)
		result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
	else
		result = CAM_SYNC_STATE_SIGNALED_ERROR;

	for (j = 0; j < req->num_out_map_entries; j++) {
		cam_sync_signal(req->out_map_entries[j].sync_id,
			CAM_SYNC_STATE_SIGNALED_SUCCESS);
		cam_sync_signal(req->out_map_entries[j].sync_id, result);
		req->out_map_entries[j].sync_id = -1;
	}

@@ -106,6 +111,30 @@ int cam_context_apply_req_to_hw(struct cam_context *ctx,
	return rc;
}

static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
{
	struct cam_context *ctx = data;
	struct cam_ctx_request *req = NULL;
	struct cam_req_mgr_apply_request apply;

	spin_lock(&ctx->lock);
	if (!list_empty(&ctx->pending_req_list))
		req = list_first_entry(&ctx->pending_req_list,
			struct cam_ctx_request, list);
	spin_unlock(&ctx->lock);

	if (!req) {
		pr_err("No more request obj free\n");
		return;
	}

	req->num_in_acked++;
	if (req->num_in_acked == req->num_in_map_entries) {
		apply.request_id = req->request_id;
		cam_context_apply_req_to_hw(ctx, &apply);
	}
}

int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
	struct cam_release_dev_cmd *cmd)
{
@@ -116,12 +145,18 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,

	if (!ctx->hw_mgr_intf) {
		pr_err("HW interface is not ready\n");
		rc = -EFAULT;
		rc = -EINVAL;
		goto end;
	}

	if (ctx->ctxt_to_hw_map) {
		arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
		if ((list_empty(&ctx->active_req_list)) &&
			(list_empty(&ctx->pending_req_list)))
			arg.active_req = false;
		else
			arg.active_req = true;

		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
			&arg);
		ctx->ctxt_to_hw_map = NULL;
@@ -135,10 +170,10 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
		req = list_first_entry(&ctx->active_req_list,
			struct cam_ctx_request, list);
		list_del_init(&req->list);
		pr_warn("signal fence in active list. fence num %d\n",
		pr_debug("signal fence in active list. fence num %d\n",
			req->num_out_map_entries);
		for (i = 0; i < req->num_out_map_entries; i++) {
			if (req->out_map_entries[i].sync_id != -1)
			if (req->out_map_entries[i].sync_id > 0)
				cam_sync_signal(req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
		}
@@ -150,10 +185,15 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
		req = list_first_entry(&ctx->pending_req_list,
			struct cam_ctx_request, list);
		list_del_init(&req->list);
		pr_debug("signal fence in pending list. fence num %d\n",
		for (i = 0; i < req->num_in_map_entries; i++)
			if (req->in_map_entries[i].sync_id > 0)
				cam_sync_deregister_callback(
					cam_context_sync_callback, ctx,
					req->in_map_entries[i].sync_id);
		pr_debug("signal out fence in pending list. fence num %d\n",
			req->num_out_map_entries);
		for (i = 0; i < req->num_out_map_entries; i++)
			if (req->out_map_entries[i].sync_id != -1)
			if (req->out_map_entries[i].sync_id > 0)
				cam_sync_signal(req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
		list_add_tail(&req->list, &ctx->free_req_list);
@@ -163,30 +203,6 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
	return rc;
}

void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
{
	struct cam_context *ctx = data;
	struct cam_ctx_request *req = NULL;
	struct cam_req_mgr_apply_request apply;

	spin_lock(&ctx->lock);
	if (!list_empty(&ctx->pending_req_list))
		req = list_first_entry(&ctx->pending_req_list,
			struct cam_ctx_request, list);
	spin_unlock(&ctx->lock);

	if (!req) {
		pr_err("No more request obj free\n");
		return;
	}

	req->num_in_acked++;
	if (req->num_in_acked == req->num_in_map_entries) {
		apply.request_id = req->request_id;
		cam_context_apply_req_to_hw(ctx, &apply);
	}
}

int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
	struct cam_config_dev_cmd *cmd)
{
+2 −0
Original line number Diff line number Diff line
@@ -97,10 +97,12 @@ struct cam_hw_acquire_args {
 * struct cam_hw_release_args - Payload for release command
 *
 * @ctxt_to_hw_map:        HW context from the acquire
 * @active_req:            Active request flag
 *
 */
struct cam_hw_release_args {
	void              *ctxt_to_hw_map;
	bool               active_req;
};

/**
+1 −1
Original line number Diff line number Diff line
@@ -107,7 +107,7 @@ static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
static int __cam_icp_handle_buf_done_in_ready(void *ctx,
	uint32_t evt_id, void *done)
{
	return cam_context_buf_done_from_hw(ctx, done, 0);
	return cam_context_buf_done_from_hw(ctx, done, evt_id);
}

static struct cam_ctx_ops
+106 −26
Original line number Diff line number Diff line
@@ -136,7 +136,7 @@ static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
	idx = i;

	buf_data.request_id = hfi_frame_process->request_id[idx];
	ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
	ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);

	/* now release memory for hfi frame process command */
	ICP_DBG("matching request id: %lld\n",
@@ -245,7 +245,7 @@ static int cam_icp_mgr_process_indirect_ack_msg(uint32_t *msg_ptr)
{
	int rc;

	switch (msg_ptr[ICP_PACKET_IPCODE]) {
	switch (msg_ptr[ICP_PACKET_OPCODE]) {
	case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
	case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
		ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_CONFIG_IO:\n");
@@ -267,7 +267,7 @@ static int cam_icp_mgr_process_indirect_ack_msg(uint32_t *msg_ptr)
		break;
	default:
		pr_err("Invalid opcode : %u\n",
			msg_ptr[ICP_PACKET_IPCODE]);
			msg_ptr[ICP_PACKET_OPCODE]);
		break;
	}

@@ -278,23 +278,27 @@ static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
{
	struct cam_icp_hw_ctx_data *ctx_data = NULL;
	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
	int rc = 0;

	if (msg_ptr[ICP_PACKET_IPCODE] ==
		HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY ||
		msg_ptr[ICP_PACKET_IPCODE] ==
		HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY) {
		ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_DESTROY:\n");
	switch (msg_ptr[ICP_PACKET_OPCODE]) {
	case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
	case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
	case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
		ICP_DBG("received IPE/BPS_DESTROY/ABORT:\n");
		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
		ctx_data =
			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
		complete(&ctx_data->wait_complete);

	} else {
		pr_err("Invalid opcode : %u\n", msg_ptr[ICP_PACKET_IPCODE]);
		return -EINVAL;
		break;
	default:
		pr_err("Invalid opcode : %u\n",
			msg_ptr[ICP_PACKET_OPCODE]);
		rc = -EINVAL;
		break;
	}

	return 0;
	return rc;
}

static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
@@ -578,6 +582,58 @@ static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
	return i;
}

static int cam_icp_mgr_abort_handle(
	struct cam_icp_hw_ctx_data *ctx_data)
{
	int rc = 0;
	int timeout = 5000;
	struct hfi_cmd_work_data *task_data;
	struct hfi_cmd_ipebps_async abort_cmd;
	struct crm_workq_task *task;
	unsigned long rem_jiffies;

	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
	if (!task) {
		pr_err("No free tasks available in command queue\n");
		return -ENOMEM;
	}

	abort_cmd.size =
		sizeof(struct hfi_cmd_ipebps_async) +
		sizeof(struct hfi_cmd_abort_destroy) -
		sizeof(abort_cmd.payload.direct);
	abort_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
	if (ctx_data->icp_dev_acquire_info.dev_type == CAM_ICP_RES_TYPE_BPS)
		abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT;
	else
		abort_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT;

	reinit_completion(&ctx_data->wait_complete);
	abort_cmd.num_fw_handles = 1;
	abort_cmd.fw_handles[0] = ctx_data->fw_handle;
	abort_cmd.user_data1 = (uint64_t)ctx_data;
	abort_cmd.user_data2 = (uint64_t)0x0;
	memcpy(abort_cmd.payload.direct, &ctx_data->temp_payload,
						sizeof(uint32_t));

	task_data = (struct hfi_cmd_work_data *)task->payload;
	task_data->data = (void *)&abort_cmd;
	task_data->request_id = 0;
	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
	task->process_cb = cam_icp_mgr_process_cmd;
	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
	ICP_DBG("fw_handle = %x ctx_data = %pK\n",
		ctx_data->fw_handle, ctx_data);
	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
			msecs_to_jiffies((timeout)));
	if (!rem_jiffies) {
		rc = -ETIMEDOUT;
		pr_err("FW timeout/err in abort handle command\n");
	}

	return rc;
}

static int cam_icp_mgr_destroy_handle(
	struct cam_icp_hw_ctx_data *ctx_data)
{
@@ -1209,6 +1265,29 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
	return rc;
}

static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
{
	struct hfi_frame_process_info *hfi_frame_process;
	int idx;

	hfi_frame_process = &ctx_data->hfi_frame_process;
	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
		if (!hfi_frame_process->request_id[idx])
			continue;

		ctx_data->ctxt_event_cb(ctx_data->context_priv, true,
			&hfi_frame_process->request_id[idx]);

		/* now release memory for hfi frame process command */
		mutex_lock(&ctx_data->hfi_frame_process.lock);
		hfi_frame_process->request_id[idx] = 0;
		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
		mutex_unlock(&ctx_data->hfi_frame_process.lock);
	}

	return 0;
}

static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
{
	int rc = 0;
@@ -1219,7 +1298,7 @@ static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
	struct cam_icp_hw_ctx_data *ctx_data = NULL;

	if (!release_hw || !hw_mgr) {
		pr_err("Invalid args\n");
		pr_err("Invalid args: %pK %pK\n", release_hw, hw_mgr);
		return -EINVAL;
	}

@@ -1229,15 +1308,17 @@ static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
			ctx_data->in_use, ctx_data->fw_handle);
	}

	mutex_lock(&hw_mgr->hw_mgr_mutex);
	ctx_data = release_hw->ctxt_to_hw_map;
	ctx_id = ctx_data->ctx_id;
	if (ctx_id < 0) {
		pr_err("Invalid ctx id\n");
		mutex_unlock(&hw_mgr->hw_mgr_mutex);
		pr_err("Invalid ctx id: %d\n", ctx_id);
		return -EINVAL;
	}
	mutex_unlock(&hw_mgr->hw_mgr_mutex);

	if (release_hw->active_req) {
		cam_icp_mgr_abort_handle(ctx_data);
		cam_icp_mgr_send_abort_status(ctx_data);
	}

	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);

@@ -1524,9 +1605,8 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
			kzalloc(bitmap_size, GFP_KERNEL);
	ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
	mutex_init(&ctx_data->hfi_frame_process.lock);
	mutex_unlock(&ctx_data->ctx_mutex);

	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
	mutex_unlock(&ctx_data->ctx_mutex);

	icp_dev_acquire_info.scratch_mem_size = ctx_data->scratch_mem_size;
	if (copy_to_user((void __user *)args->acquire_info,
@@ -1596,7 +1676,6 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
{
	int count, i, rc = 0;
	uint32_t num_dev;
	uint32_t num_ipe_dev;
	const char *name = NULL;
	struct device_node *child_node = NULL;
	struct platform_device *child_pdev = NULL;
@@ -1649,14 +1728,14 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
	}

	/* Get number of ipe device nodes and ipe mem allocation */
	rc = of_property_read_u32(of_node, "num-ipe", &num_ipe_dev);
	rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
	if (rc < 0) {
		pr_err("getting number of ipe dev nodes failed\n");
		goto num_ipe_failed;
	}

	icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kzalloc(
		sizeof(struct cam_hw_intf *) * num_ipe_dev, GFP_KERNEL);
		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
	if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
		rc = -ENOMEM;
		goto num_ipe_failed;
@@ -1718,7 +1797,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)

	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
	if (rc < 0) {
		pr_err("icp get iommu handle failed\n");
		pr_err("icp get iommu handle failed: %d\n", rc);
		goto compat_hw_name_failed;
	}

@@ -1779,6 +1858,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
cmd_work_failed:
	cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
icp_attach_failed:
	cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
	icp_hw_mgr.iommu_hdl = 0;
compat_hw_name_failed:
	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+1 −1
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@

#define ICP_PACKET_SIZE         0
#define ICP_PACKET_TYPE         1
#define ICP_PACKET_IPCODE       2
#define ICP_PACKET_OPCODE       2
#define ICP_IPE_MAX_OUTPUT_SUPPORTED 6

/**