Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 03d41402 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "UPSTREAM: Merge commit '5851a268' into msm-4.9 (comp dev)- 11/09"

parents cd624425 b8c64d7d
Loading
Loading
Loading
Loading
+20 −0
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include <linux/uaccess.h>
#include "cam_context.h"
#include "cam_debug_util.h"
#include "cam_node.h"

static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
	void *evt_data)
@@ -360,6 +361,7 @@ int cam_context_init(struct cam_context *ctx,
	ctx->session_hdl = -1;
	INIT_LIST_HEAD(&ctx->list);
	mutex_init(&ctx->ctx_mutex);
	mutex_init(&ctx->sync_mutex);
	spin_lock_init(&ctx->lock);

	ctx->dev_name = dev_name;
@@ -377,6 +379,7 @@ int cam_context_init(struct cam_context *ctx,
	for (i = 0; i < req_size; i++) {
		INIT_LIST_HEAD(&ctx->req_list[i].list);
		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
		ctx->req_list[i].ctx = ctx;
	}
	ctx->state = CAM_CTX_AVAILABLE;
	ctx->state_machine = NULL;
@@ -402,3 +405,20 @@ int cam_context_deinit(struct cam_context *ctx)

	return 0;
}

void cam_context_putref(struct cam_context *ctx)
{
	kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
	CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
}

void cam_context_getref(struct cam_context *ctx)
{
	if (kref_get_unless_zero(&ctx->refcount) == 0) {
		/* should never happen */
		WARN(1, "cam_context_getref fail\n");
	}
	CAM_DBG(CAM_CORE, "ctx device hdl %ld, ref count %d",
		ctx->dev_hdl, atomic_read(&(ctx->refcount.refcount)));
}
+31 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@

#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include "cam_req_mgr_interface.h"
#include "cam_hw_mgr_intf.h"

@@ -54,6 +55,8 @@ enum cam_context_state {
 * @num_out_map_entries:   Number of out map entries
 * @num_in_acked:          Number of in fence acked
 * @num_out_acked:         Number of out fence acked
 * @flushed:               Request is flushed
 * @ctx:                   The context to which this request belongs
 *
 */
struct cam_ctx_request {
@@ -69,6 +72,8 @@ struct cam_ctx_request {
	uint32_t                      num_out_map_entries;
	uint32_t                      num_in_acked;
	uint32_t                      num_out_acked;
	int                           flushed;
	struct cam_context           *ctx;
};

/**
@@ -156,6 +161,9 @@ struct cam_ctx_ops {
 * @state_machine:         Top level state machine
 * @ctx_priv:              Private context pointer
 * @ctxt_to_hw_map:        Context to hardware mapping pointer
 * @refcount:              Context object refcount
 * @node:                  The main node to which this context belongs
 * @sync_mutex:            mutex to sync with sync cb thread
 *
 */
struct cam_context {
@@ -185,6 +193,10 @@ struct cam_context {

	void                        *ctx_priv;
	void                        *ctxt_to_hw_map;

	struct kref                  refcount;
	void                        *node;
	struct mutex                 sync_mutex;
};

/**
@@ -347,5 +359,24 @@ int cam_context_init(struct cam_context *ctx,
		struct cam_ctx_request *req_list,
		uint32_t req_size);

/**
 * cam_context_putref()
 *
 * @brief:       Put back context reference.
 *
 * @ctx:                  Context for which ref is returned
 *
 */
void cam_context_putref(struct cam_context *ctx);

/**
 * cam_context_getref()
 *
 * @brief:       Get back context reference.
 *
 * @ctx:                  Context for which ref is taken
 *
 */
void cam_context_getref(struct cam_context *ctx);

#endif  /* _CAM_CONTEXT_H_ */
+126 −89
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include <media/cam_defs.h>

#include "cam_context.h"
#include "cam_context_utils.h"
#include "cam_mem_mgr.h"
#include "cam_node.h"
#include "cam_req_mgr_util.h"
@@ -25,6 +26,15 @@
#include "cam_trace.h"
#include "cam_debug_util.h"

static inline int cam_context_validate_thread(void)
{
	if (in_interrupt()) {
		WARN(1, "Invalid execution context\n");
		return -EINVAL;
	}
	return 0;
}

int cam_context_buf_done_from_hw(struct cam_context *ctx,
	void *done_event_data, uint32_t bubble_state)
{
@@ -33,17 +43,23 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
	struct cam_ctx_request *req;
	struct cam_hw_done_event_data *done =
		(struct cam_hw_done_event_data *)done_event_data;
	int rc;

	if (!ctx || !done) {
		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
		return -EINVAL;
	}

	rc = cam_context_validate_thread();
	if (rc)
		return rc;

	spin_lock(&ctx->lock);
	if (list_empty(&ctx->active_req_list)) {
		CAM_ERR(CAM_CTXT, "no active request");
		spin_unlock(&ctx->lock);
		return -EIO;
	}

	req = list_first_entry(&ctx->active_req_list,
		struct cam_ctx_request, list);

@@ -52,15 +68,22 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
	if (done->request_id != req->request_id) {
		CAM_ERR(CAM_CTXT, "mismatch: done req[%lld], active req[%lld]",
			done->request_id, req->request_id);
		spin_unlock(&ctx->lock);
		return -EIO;
	}

	if (!req->num_out_map_entries) {
		CAM_ERR(CAM_CTXT, "no output fence to signal");
		spin_unlock(&ctx->lock);
		return -EIO;
	}

	/*
	 * since another thread may be adding/removing from active
	 * list, so hold the lock
	 */
	list_del_init(&req->list);
	spin_unlock(&ctx->lock);
	if (!bubble_state)
		result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
	else
@@ -71,41 +94,34 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
		req->out_map_entries[j].sync_id = -1;
	}

	/*
	 * another thread may be adding/removing from free list,
	 * so hold the lock
	 */
	spin_lock(&ctx->lock);
	list_add_tail(&req->list, &ctx->free_req_list);
	req->ctx = NULL;
	spin_unlock(&ctx->lock);

	return 0;
}

int cam_context_apply_req_to_hw(struct cam_context *ctx,
static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
	struct cam_req_mgr_apply_request *apply)
{
	int rc = 0;
	struct cam_ctx_request *req;
	struct cam_context *ctx = req->ctx;
	struct cam_hw_config_args cfg;

	if (!ctx || !apply) {
		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, apply);
		rc = -EINVAL;
		goto end;
	}

	if (!ctx->hw_mgr_intf) {
		CAM_ERR(CAM_CTXT, "HW interface is not ready");
		rc = -EFAULT;
		goto end;
	}

	if (list_empty(&ctx->pending_req_list)) {
		CAM_ERR(CAM_CTXT, "No available request for Apply id %lld",
			apply->request_id);
		rc = -EFAULT;
		goto end;
	}

	spin_lock(&ctx->lock);
	req = list_first_entry(&ctx->pending_req_list,
		struct cam_ctx_request, list);
	list_del_init(&req->list);
	list_add_tail(&req->list, &ctx->active_req_list);
	spin_unlock(&ctx->lock);

	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
@@ -114,11 +130,13 @@ int cam_context_apply_req_to_hw(struct cam_context *ctx,
	cfg.out_map_entries = req->out_map_entries;
	cfg.num_out_map_entries = req->num_out_map_entries;
	cfg.priv = req->req_priv;
	list_add_tail(&req->list, &ctx->active_req_list);

	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
	if (rc)
	if (rc) {
		spin_lock(&ctx->lock);
		list_del_init(&req->list);
		spin_unlock(&ctx->lock);
	}

end:
	return rc;
@@ -126,39 +144,51 @@ int cam_context_apply_req_to_hw(struct cam_context *ctx,

static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
{
	struct cam_context *ctx = data;
	struct cam_ctx_request *req = NULL;
	struct cam_ctx_request *req = data;
	struct cam_context *ctx = NULL;
	struct cam_req_mgr_apply_request apply;
	int rc;

	if (!ctx) {
	if (!req) {
		CAM_ERR(CAM_CTXT, "Invalid input param");
		return;
	}

	spin_lock(&ctx->lock);
	if (!list_empty(&ctx->pending_req_list))
		req = list_first_entry(&ctx->pending_req_list,
			struct cam_ctx_request, list);
	spin_unlock(&ctx->lock);

	if (!req) {
		CAM_ERR(CAM_CTXT, "No more request obj free");
	rc = cam_context_validate_thread();
	if (rc)
		return;
	}

	ctx = req->ctx;
	req->num_in_acked++;
	if (req->num_in_acked == req->num_in_map_entries) {
		apply.request_id = req->request_id;
		cam_context_apply_req_to_hw(ctx, &apply);
		/*
		 * take mutex to ensure that another thread does
		 * not flush the request while this
		 * thread is submitting it to h/w. The submit to
		 * h/w and adding to the active list should happen
		 * in a critical section which is provided by this
		 * mutex.
		 */
		mutex_lock(&ctx->sync_mutex);
		if (!req->flushed) {
			cam_context_apply_req_to_hw(req, &apply);
			mutex_unlock(&ctx->sync_mutex);
		} else {
			mutex_unlock(&ctx->sync_mutex);
			req->ctx = NULL;
			req->flushed = 0;
			spin_lock(&ctx->lock);
			list_add_tail(&req->list, &ctx->free_req_list);
			spin_unlock(&ctx->lock);
		}
	}
	cam_context_putref(ctx);
}

int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
	struct cam_release_dev_cmd *cmd)
{
	int i;
	struct cam_hw_release_args arg;
	struct cam_ctx_request *req;

	if (!ctx) {
		CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -170,12 +200,9 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
		return -EINVAL;
	}

	cam_context_stop_dev_to_hw(ctx);
	arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
	if ((list_empty(&ctx->active_req_list)) &&
		(list_empty(&ctx->pending_req_list)))
	arg.active_req = false;
	else
		arg.active_req = true;

	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
	ctx->ctxt_to_hw_map = NULL;
@@ -184,38 +211,6 @@ int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
	ctx->dev_hdl = -1;
	ctx->link_hdl = -1;

	while (!list_empty(&ctx->active_req_list)) {
		req = list_first_entry(&ctx->active_req_list,
			struct cam_ctx_request, list);
		list_del_init(&req->list);
		CAM_DBG(CAM_CTXT, "signal fence in active list, num %d",
			req->num_out_map_entries);
		for (i = 0; i < req->num_out_map_entries; i++) {
			if (req->out_map_entries[i].sync_id > 0)
				cam_sync_signal(req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
		}
		list_add_tail(&req->list, &ctx->free_req_list);
	}

	while (!list_empty(&ctx->pending_req_list)) {
		req = list_first_entry(&ctx->pending_req_list,
			struct cam_ctx_request, list);
		list_del_init(&req->list);
		for (i = 0; i < req->num_in_map_entries; i++)
			if (req->in_map_entries[i].sync_id > 0)
				cam_sync_deregister_callback(
					cam_context_sync_callback, ctx,
					req->in_map_entries[i].sync_id);
		CAM_DBG(CAM_CTXT, "signal fence in pending list, num %d",
			req->num_out_map_entries);
		for (i = 0; i < req->num_out_map_entries; i++)
			if (req->out_map_entries[i].sync_id > 0)
				cam_sync_signal(req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
		list_add_tail(&req->list, &ctx->free_req_list);
	}

	return 0;
}

@@ -241,6 +236,9 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
		rc = -EFAULT;
		goto end;
	}
	rc = cam_context_validate_thread();
	if (rc)
		return rc;

	spin_lock(&ctx->lock);
	if (!list_empty(&ctx->free_req_list)) {
@@ -258,6 +256,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,

	memset(req, 0, sizeof(*req));
	INIT_LIST_HEAD(&req->list);
	req->ctx = ctx;

	/* for config dev, only memory handle is supported */
	/* map packet from the memhandle */
@@ -303,10 +302,18 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
		list_add_tail(&req->list, &ctx->pending_req_list);
		spin_unlock(&ctx->lock);
		for (i = 0; i < req->num_in_map_entries; i++) {
			cam_context_getref(ctx);
			rc = cam_sync_register_callback(
					cam_context_sync_callback,
					(void *)ctx,
					(void *)req,
					req->in_map_entries[i].sync_id);
			if (rc) {
				CAM_ERR(CAM_CTXT,
					"Failed register fence cb: %d ret = %d",
					req->in_map_entries[i].sync_id, rc);
				cam_context_putref(ctx);
				goto free_req;
			}
			CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
				req->in_map_entries[i].sync_id, rc);
		}
@@ -318,6 +325,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
free_req:
	spin_lock(&ctx->lock);
	list_add_tail(&req->list, &ctx->free_req_list);
	req->ctx = NULL;
	spin_unlock(&ctx->lock);
end:
	return rc;
@@ -452,6 +460,7 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
	uint32_t i;
	struct cam_hw_stop_args stop;
	struct cam_ctx_request *req;
	struct list_head temp_list;

	if (!ctx) {
		CAM_ERR(CAM_CTXT, "Invalid input param");
@@ -465,30 +474,51 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
		goto end;
	}

	/* stop hw first */
	if (ctx->ctxt_to_hw_map) {
		stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
		if (ctx->hw_mgr_intf->hw_stop)
			ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
				&stop);
	}
	rc = cam_context_validate_thread();
	if (rc)
		goto end;

	/* flush pending and active queue */
	while (!list_empty(&ctx->pending_req_list)) {
		req = list_first_entry(&ctx->pending_req_list,
	/*
	 * flush pending requests, take the sync lock to synchronize with the
	 * sync callback thread so that the sync cb thread does not try to
	 * submit request to h/w while the request is being flushed
	 */
	mutex_lock(&ctx->sync_mutex);
	INIT_LIST_HEAD(&temp_list);
	spin_lock(&ctx->lock);
	list_splice_init(&ctx->pending_req_list, &temp_list);
	spin_unlock(&ctx->lock);
	while (!list_empty(&temp_list)) {
		req = list_first_entry(&temp_list,
				struct cam_ctx_request, list);
		list_del_init(&req->list);
		CAM_DBG(CAM_CTXT, "signal fence in pending list. fence num %d",
			req->num_out_map_entries);
		req->flushed = 1;
		for (i = 0; i < req->num_out_map_entries; i++)
			if (req->out_map_entries[i].sync_id != -1)
				cam_sync_signal(req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
		list_add_tail(&req->list, &ctx->free_req_list);
	}
	mutex_unlock(&ctx->sync_mutex);

	while (!list_empty(&ctx->active_req_list)) {
		req = list_first_entry(&ctx->active_req_list,
	/* stop hw first */
	if (ctx->ctxt_to_hw_map) {
		stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
		if (ctx->hw_mgr_intf->hw_stop)
			ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
				&stop);
	}

	/*
	 * flush active queue, at this point h/w layer below does not have any
	 * reference to requests in active queue.
	 */
	INIT_LIST_HEAD(&temp_list);
	spin_lock(&ctx->lock);
	list_splice_init(&ctx->active_req_list, &temp_list);
	spin_unlock(&ctx->lock);

	while (!list_empty(&temp_list)) {
		req = list_first_entry(&temp_list,
				struct cam_ctx_request, list);
		list_del_init(&req->list);
		CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
@@ -497,7 +527,14 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
			if (req->out_map_entries[i].sync_id != -1)
				cam_sync_signal(req->out_map_entries[i].sync_id,
					CAM_SYNC_STATE_SIGNALED_ERROR);
		/*
		 * The spin lock should be taken here to guard the free list,
		 * as sync cb thread could be adding a pending req to free list
		 */
		spin_lock(&ctx->lock);
		list_add_tail(&req->list, &ctx->free_req_list);
		req->ctx = NULL;
		spin_unlock(&ctx->lock);
	}

end:
+0 −2
Original line number Diff line number Diff line
@@ -17,8 +17,6 @@

int cam_context_buf_done_from_hw(struct cam_context *ctx,
	void *done_event_data, uint32_t bubble_state);
int cam_context_apply_req_to_hw(struct cam_context *ctx,
	struct cam_req_mgr_apply_request *apply);
int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
	struct cam_release_dev_cmd *cmd);
int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+33 −15
Original line number Diff line number Diff line
@@ -18,6 +18,34 @@
#include "cam_trace.h"
#include "cam_debug_util.h"

static struct cam_context *cam_node_get_ctxt_from_free_list(
		struct cam_node *node)
{
	struct cam_context *ctx = NULL;

	mutex_lock(&node->list_mutex);
	if (!list_empty(&node->free_ctx_list)) {
		ctx = list_first_entry(&node->free_ctx_list,
			struct cam_context, list);
		list_del_init(&ctx->list);
	}
	mutex_unlock(&node->list_mutex);
	if (ctx)
		kref_init(&ctx->refcount);
	return ctx;
}

void cam_node_put_ctxt_to_free_list(struct kref *ref)
{
	struct cam_context *ctx =
		container_of(ref, struct cam_context, refcount);
	struct cam_node *node = ctx->node;

	mutex_lock(&node->list_mutex);
	list_add_tail(&ctx->list, &node->free_ctx_list);
	mutex_unlock(&node->list_mutex);
}

static int __cam_node_handle_query_cap(struct cam_node *node,
	struct cam_query_cap_cmd *query)
{
@@ -45,13 +73,7 @@ static int __cam_node_handle_acquire_dev(struct cam_node *node,
	if (!acquire)
		return -EINVAL;

	mutex_lock(&node->list_mutex);
	if (!list_empty(&node->free_ctx_list)) {
		ctx = list_first_entry(&node->free_ctx_list,
			struct cam_context, list);
		list_del_init(&ctx->list);
	}
	mutex_unlock(&node->list_mutex);
	ctx = cam_node_get_ctxt_from_free_list(node);
	if (!ctx) {
		rc = -ENOMEM;
		goto err;
@@ -66,9 +88,7 @@ static int __cam_node_handle_acquire_dev(struct cam_node *node,

	return 0;
free_ctx:
	mutex_lock(&node->list_mutex);
	list_add_tail(&ctx->list, &node->free_ctx_list);
	mutex_unlock(&node->list_mutex);
	cam_context_putref(ctx);
err:
	return rc;
}
@@ -207,9 +227,7 @@ static int __cam_node_handle_release_dev(struct cam_node *node,
		CAM_ERR(CAM_CORE, "destroy device handle is failed node %s",
			node->name);

	mutex_lock(&node->list_mutex);
	list_add_tail(&ctx->list, &node->free_ctx_list);
	mutex_unlock(&node->list_mutex);
	cam_context_putref(ctx);
	return rc;
}

@@ -312,8 +330,7 @@ int cam_node_shutdown(struct cam_node *node)
		if (node->ctx_list[i].dev_hdl >= 0) {
			cam_context_shutdown(&(node->ctx_list[i]));
			cam_destroy_device_hdl(node->ctx_list[i].dev_hdl);
			list_add_tail(&(node->ctx_list[i].list),
				&node->free_ctx_list);
			cam_context_putref(&(node->ctx_list[i]));
		}
	}

@@ -358,6 +375,7 @@ int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
		}
		INIT_LIST_HEAD(&ctx_list[i].list);
		list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
		ctx_list[i].node = node;
	}

	node->state = CAM_NODE_STATE_INIT;
Loading