Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 35456660 authored by Abhijit Trivedi's avatar Abhijit Trivedi
Browse files

UPSTREAM- Merge commit '68be51a5' into msm-4.14



* commit '68be51a5':
  msm: camera: Update camera driver modules
  ARM: dts: msm: Add new memory region in icp ctx for sm8150 target

Change-Id: Ic03563010082ba9296d5fad1dd47f0a1e06b2dd0
Signed-off-by: default avatarAbhijit Trivedi <abhijitt@codeaurora.org>
parents 74454aa8 68be51a5
Loading
Loading
Loading
Loading
+12 −2
Original line number Diff line number Diff line
@@ -403,11 +403,21 @@
				iova-mem-region-io {
					/* IO region is approximately 3.3 GB */
					iova-region-name = "io";
					iova-region-start = <0xd900000>;
					iova-region-len = <0xd2700000>;
					iova-region-start = <0xd911000>;
					iova-region-len = <0xd26ef000>;
					iova-region-id = <0x3>;
					status = "ok";
				};

				iova-mem-qdss-region {
					/* QDSS region is appropriate 64K */
					iova-region-name = "qdss";
					iova-region-start = <0xd900000>;
					iova-region-len = <0x10000>;
					iova-region-id = <0x5>;
					qdss-phy-addr = <0x16790000>;
					status = "ok";
				};
			};
		};

+2 −2
Original line number Diff line number Diff line
@@ -81,7 +81,7 @@ struct cam_cdm_acquire_data {
	enum cam_cdm_id id;
	void *userdata;
	void (*cam_cdm_callback)(uint32_t handle, void *userdata,
		enum cam_cdm_cb_status status, uint32_t cookie);
		enum cam_cdm_cb_status status, uint64_t cookie);
	uint32_t base_array_cnt;
	struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
	struct cam_hw_version cdm_version;
@@ -128,7 +128,7 @@ struct cam_cdm_bl_cmd {
struct cam_cdm_bl_request {
	int flag;
	void *userdata;
	uint32_t cookie;
	uint64_t cookie;
	enum cam_cdm_bl_cmd_addr_type type;
	uint32_t cmd_arrary_count;
	struct cam_cdm_bl_cmd cmd[1];
+3 −0
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
int cam_context_shutdown(struct cam_context *ctx)
{
	int rc = 0;
	int32_t ctx_hdl = ctx->dev_hdl;

	if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev) {
		rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
@@ -56,6 +57,8 @@ int cam_context_shutdown(struct cam_context *ctx)
			CAM_ERR(CAM_CORE, "Error while dev release %d", rc);
	}

	if (!rc)
		cam_destroy_device_hdl(ctx_hdl);
	return rc;
}

+1 −1
Original line number Diff line number Diff line
@@ -70,7 +70,7 @@ struct cam_ctx_request {
	uint32_t                      num_in_map_entries;
	struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
	uint32_t                      num_out_map_entries;
	uint32_t                      num_in_acked;
	atomic_t                      num_in_acked;
	uint32_t                      num_out_acked;
	int                           flushed;
	struct cam_context           *ctx;
+66 −18
Original line number Diff line number Diff line
@@ -143,6 +143,7 @@ static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
			ctx->dev_name, ctx->ctx_id, req->request_id);

	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
	cfg.request_id = req->request_id;
	cfg.hw_update_entries = req->hw_update_entries;
	cfg.num_hw_update_entries = req->num_hw_update_entries;
	cfg.out_map_entries = req->out_map_entries;
@@ -188,8 +189,7 @@ static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
		return;
	}

	req->num_in_acked++;
	if (req->num_in_acked == req->num_in_map_entries) {
	if (atomic_inc_return(&req->num_in_acked) == req->num_in_map_entries) {
		apply.request_id = req->request_id;
		/*
		 * take mutex to ensure that another thread does
@@ -341,6 +341,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
	req->num_hw_update_entries = cfg.num_hw_update_entries;
	req->num_out_map_entries = cfg.num_out_map_entries;
	req->num_in_map_entries = cfg.num_in_map_entries;
	atomic_set(&req->num_in_acked, 0);
	req->request_id = packet->header.request_id;
	req->status = 1;
	req->req_priv = cfg.priv;
@@ -491,6 +492,7 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
	struct cam_ctx_request *req;
	uint32_t i;
	int rc = 0;
	bool free_req;

	CAM_DBG(CAM_CTXT, "[%s] E: NRT flush ctx", ctx->dev_name);

@@ -527,6 +529,21 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)

		flush_args.flush_req_pending[flush_args.num_req_pending++] =
			req->req_priv;

		free_req = false;
		for (i = 0; i < req->num_in_map_entries; i++) {
			rc = cam_sync_deregister_callback(
				cam_context_sync_callback,
				(void *)req,
				req->in_map_entries[i].sync_id);
			if (!rc) {
				cam_context_putref(ctx);
				if (atomic_inc_return(&req->num_in_acked) ==
					req->num_in_map_entries)
					free_req = true;
			}
		}

		for (i = 0; i < req->num_out_map_entries; i++) {
			if (req->out_map_entries[i].sync_id != -1) {
				rc = cam_sync_signal(
@@ -536,13 +553,23 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
					CAM_ERR(CAM_CTXT,
					"Req: %llu already signalled, sync_id:%d",
					req->request_id,
						req->out_map_entries[i].
						sync_id);
					req->out_map_entries[i].sync_id);
					break;
				}
			}
		}

		/*
		 * If we have deregistered the last sync callback, req will
		 * not be put on the free list. So put it on the free list here
		 */
		if (free_req) {
			req->ctx = NULL;
			spin_lock(&ctx->lock);
			list_add_tail(&req->list, &ctx->free_req_list);
			spin_unlock(&ctx->lock);
		}

		if (cam_debug_ctx_req_list & ctx->dev_id)
			CAM_INFO(CAM_CTXT,
				"[%s][%d] : Deleting req[%llu] from temp_list",
@@ -628,7 +655,9 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
	struct cam_ctx_request *req = NULL;
	struct cam_hw_flush_args flush_args;
	uint32_t i;
	int32_t sync_id = 0;
	int rc = 0;
	bool free_req = false;

	CAM_DBG(CAM_CTXT, "[%s] E: NRT flush req", ctx->dev_name);

@@ -681,32 +710,51 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
	}

	if (req) {
		if (flush_args.num_req_pending) {
			for (i = 0; i < req->num_in_map_entries; i++) {
				rc = cam_sync_deregister_callback(
					cam_context_sync_callback,
					(void *)req,
					req->in_map_entries[i].sync_id);
				if (rc)
					continue;

				cam_context_putref(ctx);
				if (atomic_inc_return(&req->num_in_acked) ==
					req->num_in_map_entries)
					free_req = true;
			}
		}

		if (flush_args.num_req_pending || flush_args.num_req_active) {
			for (i = 0; i < req->num_out_map_entries; i++)
				if (req->out_map_entries[i].sync_id != -1) {
					rc = cam_sync_signal(
						req->out_map_entries[i].sync_id,
			for (i = 0; i < req->num_out_map_entries; i++) {
				sync_id =
					req->out_map_entries[i].sync_id;
				if (sync_id != -1) {
					rc = cam_sync_signal(sync_id,
						CAM_SYNC_STATE_SIGNALED_ERROR);
					if (rc == -EALREADY) {
						CAM_ERR(CAM_CTXT,
						"Req: %llu already signalled, sync_id:%d",
							req->request_id,
							req->out_map_entries[i].
							sync_id);
						req->request_id, sync_id);
						break;
					}
				}
			if (flush_args.num_req_active) {
			}
			if (flush_args.num_req_active || free_req) {
				req->ctx = NULL;
				spin_lock(&ctx->lock);
				list_add_tail(&req->list, &ctx->free_req_list);
				spin_unlock(&ctx->lock);
				req->ctx = NULL;

				if (cam_debug_ctx_req_list & ctx->dev_id)
					CAM_INFO(CAM_CTXT,
						"[%s][%d] : Moving req[%llu] from active_list to free_list",
						"[%s][%d] : Moving req[%llu] from %s to free_list",
						ctx->dev_name, ctx->ctx_id,
						req->request_id);
						req->request_id,
						flush_args.num_req_active ?
							"active_list" :
							"pending_list");
			}
		}
	}
Loading