Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 018d8dae authored by Depeng Shao's avatar Depeng Shao
Browse files

msm: camera: core: Allocate memory for flush req array dynamically



Allocate memory for flush req array dynamically based
on the list count, in case the req count bigger than
the size of req list.

CRs-Fixed: 2835525
Change-Id: I7e48fb9dc7815cb991c84195e761a3c3f1c26b48
Signed-off-by: default avatarDepeng Shao <depengs@codeaurora.org>
parent 85d260ec
Loading
Loading
Loading
Loading
+93 −11
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
 */

#include <linux/debugfs.h>
@@ -602,7 +602,7 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
	struct cam_hw_flush_args flush_args;
	struct list_head temp_list;
	struct cam_ctx_request *req;
	uint32_t i;
	uint32_t i, j;
	int rc = 0;
	bool free_req;

@@ -618,15 +618,31 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
	INIT_LIST_HEAD(&temp_list);
	spin_lock(&ctx->lock);
	list_splice_init(&ctx->pending_req_list, &temp_list);
	flush_args.num_req_pending = 0;
	list_for_each_entry(req, &temp_list, list) {
		flush_args.num_req_pending++;
	}
	spin_unlock(&ctx->lock);

	flush_args.flush_req_pending = kcalloc(flush_args.num_req_pending,
		sizeof(void *), GFP_KERNEL);
	if (!flush_args.flush_req_pending) {
		rc = -ENOMEM;
		CAM_ERR(CAM_CTXT,
			"[%s][%d] : Failed to malloc memory for flush_req_pending",
			ctx->dev_name, ctx->ctx_id);
		mutex_unlock(&ctx->sync_mutex);
		goto err;
	}

	if (cam_debug_ctx_req_list & ctx->dev_id)
		CAM_INFO(CAM_CTXT,
			"[%s][%d] : Moving all pending requests from pending_list to temp_list",
			ctx->dev_name, ctx->ctx_id);

	flush_args.num_req_pending = 0;
	flush_args.last_flush_req = ctx->last_flush_req;

	j = 0;
	while (true) {
		spin_lock(&ctx->lock);
		if (list_empty(&temp_list)) {
@@ -641,7 +657,7 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
		spin_unlock(&ctx->lock);
		req->flushed = 1;

		flush_args.flush_req_pending[flush_args.num_req_pending++] =
		flush_args.flush_req_pending[j++] =
			req->req_priv;

		free_req = false;
@@ -692,11 +708,31 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
	}
	mutex_unlock(&ctx->sync_mutex);

	kfree(flush_args.flush_req_pending);
	flush_args.flush_req_pending = NULL;

	if (ctx->hw_mgr_intf->hw_flush) {
		spin_lock(&ctx->lock);
		flush_args.num_req_active = 0;
		list_for_each_entry(req, &ctx->active_req_list, list) {
			flush_args.num_req_active++;
		}
		spin_unlock(&ctx->lock);

		flush_args.flush_req_active = kcalloc(flush_args.num_req_active,
			sizeof(void *), GFP_KERNEL);
		if (!flush_args.flush_req_active) {
			rc = -ENOMEM;
			CAM_ERR(CAM_CTXT,
				"[%s][%d] : Failed to malloc memory for flush_req_active",
				ctx->dev_name, ctx->ctx_id);
			goto err;
		}

		spin_lock(&ctx->lock);
		j = 0;
		list_for_each_entry(req, &ctx->active_req_list, list) {
			flush_args.flush_req_active[flush_args.num_req_active++]
			flush_args.flush_req_active[j++]
				= req->req_priv;
		}
		spin_unlock(&ctx->lock);
@@ -709,6 +745,9 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
		}
	}

	kfree(flush_args.flush_req_active);
	flush_args.flush_req_active = NULL;

	INIT_LIST_HEAD(&temp_list);
	spin_lock(&ctx->lock);
	list_splice_init(&ctx->active_req_list, &temp_list);
@@ -762,7 +801,8 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)

	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);

	return 0;
err:
	return rc;
}

int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
@@ -782,6 +822,25 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
	flush_args.num_req_active = 0;
	mutex_lock(&ctx->sync_mutex);
	spin_lock(&ctx->lock);
	list_for_each_entry(req, &ctx->pending_req_list, list) {
		if (req->request_id != cmd->req_id)
			continue;
		flush_args.num_req_pending++;
	}
	spin_unlock(&ctx->lock);

	flush_args.flush_req_pending = kcalloc(flush_args.num_req_pending,
		sizeof(void *), GFP_KERNEL);
	if (!flush_args.flush_req_pending) {
		rc = -ENOMEM;
		CAM_ERR(CAM_CTXT,
			"[%s][%d] : Failed to malloc memory for flush_req_pending",
			ctx->dev_name, ctx->ctx_id);
		goto err;
	}

	spin_lock(&ctx->lock);
	i = 0;
	list_for_each_entry(req, &ctx->pending_req_list, list) {
		if (req->request_id != cmd->req_id)
			continue;
@@ -794,24 +853,41 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
		list_del_init(&req->list);
		req->flushed = 1;

		flush_args.flush_req_pending[flush_args.num_req_pending++] =
		flush_args.flush_req_pending[i++] =
			req->req_priv;
		break;
	}
	spin_unlock(&ctx->lock);
	mutex_unlock(&ctx->sync_mutex);

	if (ctx->hw_mgr_intf->hw_flush) {
		if (!flush_args.num_req_pending) {
			spin_lock(&ctx->lock);
			list_for_each_entry(req, &ctx->active_req_list, list) {
				if (req->request_id != cmd->req_id)
					continue;
				flush_args.num_req_active++;
			}
			spin_unlock(&ctx->lock);

			flush_args.flush_req_active = kcalloc(flush_args.num_req_active,
				sizeof(void *), GFP_KERNEL);
			if (!flush_args.flush_req_pending) {
				rc = -ENOMEM;
				CAM_ERR(CAM_CTXT,
					"[%s][%d] : Failed to malloc memory for flush_req_pending",
					ctx->dev_name, ctx->ctx_id);
				goto err;
			}

			spin_lock(&ctx->lock);
			i = 0;
			list_for_each_entry(req, &ctx->active_req_list, list) {
				if (req->request_id != cmd->req_id)
					continue;

				list_del_init(&req->list);

				flush_args.flush_req_active[
					flush_args.num_req_active++] =
				flush_args.flush_req_active[i++] =
					req->req_priv;
				break;
			}
@@ -823,6 +899,10 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
			flush_args.flush_type = CAM_FLUSH_TYPE_REQ;
			ctx->hw_mgr_intf->hw_flush(
				ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
			kfree(flush_args.flush_req_pending);
			flush_args.flush_req_pending = NULL;
			kfree(flush_args.flush_req_active);
			flush_args.flush_req_active = NULL;
		}
	}

@@ -878,7 +958,9 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
	}
	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);

	return 0;
err:
	mutex_unlock(&ctx->sync_mutex);
	return rc;
}

int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+3 −3
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
 */

#ifndef _CAM_HW_MGR_INTF_H_
@@ -289,9 +289,9 @@ struct cam_hw_config_args {
struct cam_hw_flush_args {
	void                           *ctxt_to_hw_map;
	uint32_t                        num_req_pending;
	void                           *flush_req_pending[20];
	void                          **flush_req_pending;
	uint32_t                        num_req_active;
	void                           *flush_req_active[20];
	void                          **flush_req_active;
	enum flush_type_t               flush_type;
	uint32_t                        last_flush_req;
};