Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b6472f3e authored by Venkat Chinta's avatar Venkat Chinta Committed by Pavan Kumar Chilamkurthi
Browse files

msm: camera: core: Multiple fixes in NRT flush



Removes potential double add to context free list.
Clears request from context lists immediately in
case of flush specific request. Clears context
active list after drivers complete flush in case
of flush context.

Change-Id: I5a1edd1f5eaf7787d81f90a9afe7c3bf6c01e57d
Signed-off-by: default avatarVenkat Chinta <vchinta@codeaurora.org>
Signed-off-by: default avatarPavan Kumar Chilamkurthi <pchilamk@codeaurora.org>
parent c092e59f
Loading
Loading
Loading
Loading
+3 −4
Original line number Diff line number Diff line
@@ -281,10 +281,10 @@ int cam_context_handle_release_dev(struct cam_context *ctx,
int cam_context_handle_flush_dev(struct cam_context *ctx,
	struct cam_flush_dev_cmd *cmd)
{
	int rc;
	int rc = 0;

	if (!ctx->state_machine) {
		CAM_ERR(CAM_CORE, "context is not ready");
		CAM_ERR(CAM_CORE, "Context is not ready");
		return -EINVAL;
	}

@@ -298,9 +298,8 @@ int cam_context_handle_flush_dev(struct cam_context *ctx,
		rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
			ctx, cmd);
	} else {
		CAM_ERR(CAM_CORE, "No flush device in dev %d, state %d",
		CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d",
			ctx->dev_hdl, ctx->state);
		rc = -EPROTO;
	}
	mutex_unlock(&ctx->ctx_mutex);

+65 −18
Original line number Diff line number Diff line
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -430,6 +430,8 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
	uint32_t i;
	int rc = 0;

	CAM_DBG(CAM_CTXT, "E: NRT flush ctx");

	/*
	 * flush pending requests, take the sync lock to synchronize with the
	 * sync callback thread so that the sync cb thread does not try to
@@ -444,23 +446,33 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
	while (!list_empty(&temp_list)) {
		req = list_first_entry(&temp_list,
				struct cam_ctx_request, list);

		list_del_init(&req->list);
		req->flushed = 1;

		flush_args.flush_req_pending[flush_args.num_req_pending++] =
			req->req_priv;
		for (i = 0; i < req->num_out_map_entries; i++)
			if (req->out_map_entries[i].sync_id != -1)
				cam_sync_signal(req->out_map_entries[i].sync_id,
			if (req->out_map_entries[i].sync_id != -1) {
				rc = cam_sync_signal(
					req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
				if (rc == -EALREADY) {
					CAM_ERR(CAM_CTXT,
						"Req: %llu already signalled, sync_id:%d",
						req->request_id,
						req->out_map_entries[i].
						sync_id);
					break;
				}
			}
	}
	mutex_unlock(&ctx->sync_mutex);

	if (ctx->hw_mgr_intf->hw_flush) {
		flush_args.num_req_active = 0;
		spin_lock(&ctx->lock);
		INIT_LIST_HEAD(&temp_list);
		list_splice_init(&ctx->active_req_list, &temp_list);
		list_for_each_entry(req, &temp_list, list) {
		list_for_each_entry(req, &ctx->active_req_list, list) {
			flush_args.flush_req_active[flush_args.num_req_active++]
				= req->req_priv;
		}
@@ -474,14 +486,31 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
		}
	}

	INIT_LIST_HEAD(&temp_list);
	spin_lock(&ctx->lock);
	list_splice_init(&ctx->active_req_list, &temp_list);
	INIT_LIST_HEAD(&ctx->active_req_list);
	spin_unlock(&ctx->lock);

	while (!list_empty(&temp_list)) {
		req = list_first_entry(&temp_list,
			struct cam_ctx_request, list);
		list_del_init(&req->list);
		for (i = 0; i < req->num_out_map_entries; i++)
		for (i = 0; i < req->num_out_map_entries; i++) {
			if (req->out_map_entries[i].sync_id != -1) {
				cam_sync_signal(req->out_map_entries[i].sync_id,
				rc = cam_sync_signal(
					req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
				if (rc == -EALREADY) {
					CAM_ERR(CAM_CTXT,
						"Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d",
						req->request_id, req->ctx,
						req->ctx->dev_name,
						req->ctx->dev_hdl,
						req->ctx->state);
					break;
				}
			}
		}

		spin_lock(&ctx->lock);
@@ -489,9 +518,10 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
		spin_unlock(&ctx->lock);
		req->ctx = NULL;
	}
	INIT_LIST_HEAD(&ctx->active_req_list);

	return rc;
	CAM_DBG(CAM_CTXT, "X: NRT flush ctx");

	return 0;
}

int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
@@ -502,6 +532,8 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
	uint32_t i;
	int rc = 0;

	CAM_DBG(CAM_CTXT, "E: NRT flush req");

	flush_args.num_req_pending = 0;
	flush_args.num_req_active = 0;
	mutex_lock(&ctx->sync_mutex);
@@ -510,7 +542,9 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
		if (req->request_id != cmd->req_id)
			continue;

		list_del_init(&req->list);
		req->flushed = 1;

		flush_args.flush_req_pending[flush_args.num_req_pending++] =
			req->req_priv;
		break;
@@ -525,6 +559,8 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
				if (req->request_id != cmd->req_id)
					continue;

				list_del_init(&req->list);

				flush_args.flush_req_active[
					flush_args.num_req_active++] =
					req->req_priv;
@@ -543,20 +579,31 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,

	if (req) {
		if (flush_args.num_req_pending || flush_args.num_req_active) {
			list_del_init(&req->list);
			for (i = 0; i < req->num_out_map_entries; i++)
				if (req->out_map_entries[i].sync_id != -1)
					cam_sync_signal(
				if (req->out_map_entries[i].sync_id != -1) {
					rc = cam_sync_signal(
						req->out_map_entries[i].sync_id,
						CAM_SYNC_STATE_SIGNALED_ERROR);
					if (rc == -EALREADY) {
						CAM_ERR(CAM_CTXT,
							"Req: %llu already signalled, sync_id:%d",
							req->request_id,
							req->out_map_entries[i].
							sync_id);
						break;
					}
				}
			if (flush_args.num_req_active) {
				spin_lock(&ctx->lock);
				list_add_tail(&req->list, &ctx->free_req_list);
				spin_unlock(&ctx->lock);
				req->ctx = NULL;
			}
		}
	}
	CAM_DBG(CAM_CTXT, "X: NRT flush req");

	return rc;
	return 0;
}

int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+1 −1
Original line number Diff line number Diff line
@@ -220,7 +220,7 @@ static int __cam_node_handle_flush_dev(struct cam_node *node,

	rc = cam_context_handle_flush_dev(ctx, flush);
	if (rc)
		CAM_ERR(CAM_CORE, "FLush failure for node %s", node->name);
		CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);

	return rc;
}