Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 258fb16f authored by Karthik Anantha Ram's avatar Karthik Anantha Ram
Browse files

msm: camera: reqmgr: Modify frame sync logic



This change performs request id matching for any two
given sync links. Also the change adds new functionality
to support sync links with different pipeline delays.

Change-Id: Ia0ffdd23018f118c5e4aa7eab1c85592f1f9080d
Signed-off-by: default avatarKarthik Anantha Ram <kartanan@codeaurora.org>
Signed-off-by: default avatarDepeng Shao <depengs@codeaurora.org>
parent e76e56c7
Loading
Loading
Loading
Loading
+304 −209
Original line number Diff line number Diff line
@@ -43,9 +43,6 @@ void cam_req_mgr_core_link_reset(struct cam_req_mgr_core_link *link)
	link->subscribe_event = 0;
	link->trigger_mask = 0;
	link->sync_link = 0;
	link->sof_counter = 0;
	link->sync_self_ref = 0;
	link->frame_skip_flag = false;
	link->sync_link_sof_skip = false;
	link->open_req_cnt = 0;
	link->last_flush_id = 0;
@@ -143,31 +140,35 @@ static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
}

/**
 * __cam_req_mgr_validate_inject_delay()
 * __cam_req_mgr_inject_delay()
 *
 * @brief    : Check if any pd device is introducing inject delay
 * @brief    : Check if any pd device is injecting delay
 * @tbl      : cam_req_mgr_req_tbl
 * @curr_idx : slot idx
 *
 * @return   : 0 for success, negative for failure
 */
static int __cam_req_mgr_validate_inject_delay(
static int __cam_req_mgr_inject_delay(
	struct cam_req_mgr_req_tbl  *tbl,
	int32_t curr_idx)
{
	struct cam_req_mgr_tbl_slot *slot = NULL;
	int rc = 0;

	while (tbl) {
		slot = &tbl->slot[curr_idx];
		if (slot->inject_delay > 0) {
			slot->inject_delay--;
			return -EAGAIN;
			CAM_DBG(CAM_CRM,
				"Delay injected by pd %d device",
				tbl->pd);
			rc = -EAGAIN;
		}
		__cam_req_mgr_dec_idx(&curr_idx, tbl->pd_delta,
			tbl->num_slots);
		tbl = tbl->next;
	}
	return 0;
	return rc;
}

/**
@@ -209,19 +210,6 @@ static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status,
		traverse_data->in_q->slot[curr_idx].skip_idx);

	if ((traverse_data->self_link == true) &&
		(!traverse_data->inject_delay_chk)) {
		rc = __cam_req_mgr_validate_inject_delay(tbl, curr_idx);
		if (rc) {
			CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
			apply_data[tbl->pd].req_id = -1;
			/* This pd tbl not ready to proceed with asked idx */
			SET_FAILURE_BIT(traverse_data->result, tbl->pd);
			return -EAGAIN;
		}
		traverse_data->inject_delay_chk = true;
	}

	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
	if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
		traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
@@ -552,14 +540,12 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 *             traversed through
 * @idx      : index within input request queue
 * @validate_only : Whether to validate only and/or update settings
 * @self_link : To indicate whether the validation is for the given link or
 *              other sync link
 *
 * @return   : 0 for success, negative for failure
 *
 */
static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
	int32_t idx, bool validate_only, bool self_link)
	int32_t idx, bool validate_only)
{
	int                            rc;
	struct cam_req_mgr_traverse    traverse_data;
@@ -581,19 +567,16 @@ static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
	traverse_data.in_q = in_q;
	traverse_data.result = 0;
	traverse_data.validate_only = validate_only;
	traverse_data.self_link = self_link;
	traverse_data.inject_delay_chk = false;
	traverse_data.open_req_cnt = link->open_req_cnt;

	/*
	 *  Traverse through all pd tables, if result is success,
	 *  apply the settings
	 */

	rc = __cam_req_mgr_traverse(&traverse_data);
	CAM_DBG(CAM_CRM,
		"SOF: idx %d self_link %d validate %d result %x pd_mask %x rc %d",
		idx, traverse_data.self_link, traverse_data.validate_only,
		traverse_data.result, link->pd_mask, rc);
		"SOF: idx %d result %x pd_mask %x rc %d",
		idx, traverse_data.result, link->pd_mask, rc);

	if (!rc && traverse_data.result == link->pd_mask) {
		CAM_DBG(CAM_CRM,
@@ -642,113 +625,198 @@ static int32_t __cam_req_mgr_find_slot_for_req(
}

/**
 * __cam_req_mgr_reset_sof_cnt()
 * __cam_req_mgr_check_sync_for_mslave()
 *
 * @brief    : the sof_count for both the links are reset
 * @link     : pointer to link whose input queue and req tbl are
 * @brief    : Processes requests during sync mode [master-slave]
 *             Here master corresponds to the link having a higher
 *             max_delay (pd) compared to the slave link.
 * @link     : Pointer to link whose input queue and req tbl are
 *             traversed through
 * @slot     : Pointer to the current slot being processed
 * @return   : 0 for success, negative for failure
 *
 */
static void __cam_req_mgr_reset_sof_cnt(
	struct cam_req_mgr_core_link *link)
static int __cam_req_mgr_check_sync_for_mslave(
	struct cam_req_mgr_core_link *link,
	struct cam_req_mgr_slot *slot)
{
	link->sof_counter = -1;
	link->sync_link->sof_counter = -1;
	link->frame_skip_flag = false;
	struct cam_req_mgr_core_link *sync_link = NULL;
	int sync_slot_idx = 0, prev_idx, next_idx, rd_idx, rc = 0;
	int64_t req_id = 0, sync_req_id = 0;

	if (!link->sync_link) {
		CAM_ERR(CAM_CRM, "Sync link null");
		return -EINVAL;
	}

	sync_link = link->sync_link;
	req_id = slot->req_id;

	CAM_DBG(CAM_CRM,
		"link_hdl %x self_counter %lld other_counter %lld frame_skip_lag %d",
		link->link_hdl, link->sof_counter,
		link->sync_link->sof_counter, link->frame_skip_flag);
		"link_hdl %x req %lld frame_skip_flag %d ",
		link->link_hdl, req_id, link->sync_link_sof_skip);

	if (sync_link->sync_link_sof_skip) {
		CAM_DBG(CAM_CRM,
			"No req applied on corresponding SOF on sync link: %x",
				sync_link->link_hdl);
		sync_link->sync_link_sof_skip = false;
		__cam_req_mgr_inject_delay(link->req.l_tbl, slot->idx);
		return -EAGAIN;
	}

/**
 * __cam_req_mgr_sof_cnt_initialize()
 *
 * @brief    : when the sof count is intially -1 it increments count
 *             and computes the sync_self_ref for this link
 *             the count needs to be wrapped back starting from 0
 * @link     : pointer to link whose input queue and req tbl are
 *             traversed through
 *
 */
static void __cam_req_mgr_sof_cnt_initialize(
	struct cam_req_mgr_core_link *link)
{
	link->sof_counter++;
	link->sync_self_ref = link->sof_counter -
		link->sync_link->sof_counter;
	if (link->is_master) {
		rc = __cam_req_mgr_inject_delay(link->req.l_tbl, slot->idx);
		if (rc) {
			CAM_DBG(CAM_CRM,
				"Skip Process Req: %lld on link: %x",
				req_id, link->link_hdl);
			link->sync_link_sof_skip = true;
			return rc;
		}

		if (sync_link->initial_skip) {
			CAM_DBG(CAM_CRM,  "Link 0x%x [slave] not streamed on",
				sync_link->link_hdl);
			return -EAGAIN;
		}

		rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true);
		if (rc) {
			CAM_DBG(CAM_CRM,
		"link_hdl %x self_counter %lld other_counter %lld",
		link->link_hdl, link->sof_counter,
		link->sync_link->sof_counter);
				"Req: %lld [master] not ready on link: %x, rc=%d",
				req_id, link->link_hdl, rc);
			return rc;
		}

/**
 * __cam_req_mgr_wrap_sof_cnt()
 *
 * @brief    : once the sof count reaches a predefined maximum
 *             the count needs to be wrapped back starting from 0
 * @link     : pointer to link whose input queue and req tbl are
 *             traversed through
 *
 */
static void __cam_req_mgr_wrap_sof_cnt(
	struct cam_req_mgr_core_link *link)
{
	link->sof_counter = (MAX_SYNC_COUNT -
		(link->sync_link->sof_counter));
	link->sync_link->sof_counter = 0;
		prev_idx = slot->idx;
		__cam_req_mgr_dec_idx(&prev_idx,
			(link->max_delay - sync_link->max_delay),
			link->req.in_q->num_slots);

		rd_idx = sync_link->req.in_q->rd_idx;
		sync_req_id = link->req.in_q->slot[prev_idx].req_id;
		if (sync_req_id != -1) {
			sync_slot_idx = __cam_req_mgr_find_slot_for_req(
				sync_link->req.in_q, sync_req_id);

			if (sync_slot_idx == -1) {
				CAM_DBG(CAM_CRM,
		"link_hdl %x self_counter %lld sync_link_hdl %x other_counter %lld",
		link->link_hdl, link->sof_counter,
		link->sync_link->link_hdl, link->sync_link->sof_counter);
					"Prev Req: %lld [master] not found on link: %x [slave]",
					sync_req_id, sync_link->link_hdl);
				return -EINVAL;
			}

/**
 * __cam_req_mgr_validate_sof_cnt()
 *
 * @brief     : validates sof count difference for a given link
 * @link      : pointer to link whose input queue and req tbl are
 *              traversed through
 * @sync_link : pointer to the sync link
 * @return   : 0 for success, negative for failure
 *
 */
static int __cam_req_mgr_validate_sof_cnt(
	struct cam_req_mgr_core_link *link,
	struct cam_req_mgr_core_link *sync_link)
{
	int64_t sync_diff = 0;
	int rc = 0;
			if ((sync_link->req.in_q->slot[sync_slot_idx].status !=
				CRM_SLOT_STATUS_REQ_APPLIED) &&
				((sync_slot_idx - rd_idx) >= 1) &&
				(sync_link->req.in_q->slot[rd_idx].status !=
				CRM_SLOT_STATUS_REQ_APPLIED)) {
				CAM_DBG(CAM_CRM,
					"Prev Req: %lld [master] not next on link: %x [slave]",
					sync_req_id,
					sync_link->link_hdl);
				return -EINVAL;
			}

	if (link->sof_counter == MAX_SYNC_COUNT)
		__cam_req_mgr_wrap_sof_cnt(link);
			rc = __cam_req_mgr_check_link_is_ready(sync_link,
				sync_slot_idx, true);
			if (rc &&
				(sync_link->req.in_q->slot[sync_slot_idx].status
				!= CRM_SLOT_STATUS_REQ_APPLIED)) {
				CAM_DBG(CAM_CRM,
					"Req: %lld not ready on [slave] link: %x, rc=%d",
					sync_req_id, sync_link->link_hdl, rc);
				return rc;
			}
		}
	} else {
		if (link->initial_skip)
			link->initial_skip = false;

	sync_diff = link->sof_counter - sync_link->sof_counter;
		sync_slot_idx = __cam_req_mgr_find_slot_for_req(
			sync_link->req.in_q, req_id);
		if (sync_slot_idx == -1) {
			CAM_DBG(CAM_CRM,
				"Req: %lld not found on link: %x [master]",
				req_id, sync_link->link_hdl);
			return -EINVAL;
		}

		if (sync_link->req.in_q->slot[sync_slot_idx].status !=
			CRM_SLOT_STATUS_REQ_APPLIED) {
			CAM_DBG(CAM_CRM,
		"link[%x] self_counter=%lld other_counter=%lld diff=%lld sync_self_ref=%lld",
		link->link_hdl, link->sof_counter,
		sync_link->sof_counter, sync_diff, link->sync_self_ref);
				"Req: %lld [master] not applied yet: %x",
				req_id, sync_link->link_hdl);
			return -EAGAIN;
		}

	if (sync_diff > SYNC_LINK_SOF_CNT_MAX_LMT) {
		link->sync_link->frame_skip_flag = true;
		CAM_WARN(CAM_CRM,
			"Detected anomaly, skip link_hdl %x self_counter=%lld other_counter=%lld sync_self_ref=%lld",
			link->link_hdl, link->sof_counter,
			sync_link->sof_counter, link->sync_self_ref);
		rc = -EPERM;
		rc = __cam_req_mgr_inject_delay(link->req.l_tbl, slot->idx);
		if (rc) {
			CAM_DBG(CAM_CRM,
				"Skip Process Req: %lld on link: %x",
				req_id, link->link_hdl);
			link->sync_link_sof_skip = true;
			return rc;
		}

		next_idx = link->req.in_q->rd_idx;
		rd_idx = sync_link->req.in_q->rd_idx;
		__cam_req_mgr_inc_idx(&next_idx,
			(sync_link->max_delay - link->max_delay),
			link->req.in_q->num_slots);

		sync_req_id = link->req.in_q->slot[next_idx].req_id;
		sync_slot_idx = __cam_req_mgr_find_slot_for_req(
					sync_link->req.in_q, sync_req_id);
		if (sync_slot_idx == -1) {
			CAM_DBG(CAM_CRM,
				"Next Req: %lld [slave] not found on link: %x [master]",
				sync_req_id, sync_link->link_hdl);
			return -EINVAL;
		}

		if ((sync_link->req.in_q->slot[sync_slot_idx].status !=
			CRM_SLOT_STATUS_REQ_APPLIED) &&
			((sync_slot_idx - rd_idx) >= 1) &&
			(sync_link->req.in_q->slot[rd_idx].status !=
			CRM_SLOT_STATUS_REQ_APPLIED)) {
			CAM_DBG(CAM_CRM,
				"Next Req: %lld [slave] not next on link: %x [master]",
				sync_req_id, sync_link->link_hdl);
			return -EINVAL;
		}

		rc = __cam_req_mgr_check_link_is_ready(sync_link,
			sync_slot_idx, true);
		if (rc && (sync_link->req.in_q->slot[sync_slot_idx].status !=
			CRM_SLOT_STATUS_REQ_APPLIED)) {
			CAM_DBG(CAM_CRM,
				"Next Req: %lld [slave] not ready on [master] link: %x, rc=%d",
				sync_req_id, sync_link->link_hdl, rc);
			return rc;
		}
	}

	CAM_DBG(CAM_REQ,
		"Req: %lld ready to apply on link: %x [validation successful]",
		req_id, link->link_hdl);

	/*
	 *  At this point all validation is successfully done
	 *  and we can proceed to apply the given request.
	 *  Ideally the next call should return success.
	 */
	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, false);
	if (rc)
		CAM_WARN(CAM_CRM, "Unexpected return value rc: %d", rc);

	return 0;
}


/**
 * __cam_req_mgr_process_sync_req()
 * __cam_req_mgr_check_sync_request_is_ready()
 *
 * @brief    : processes requests during sync mode
 * @link     : pointer to link whose input queue and req tbl are
@@ -757,13 +825,13 @@ static int __cam_req_mgr_validate_sof_cnt(
 * @return   : 0 for success, negative for failure
 *
 */
static int __cam_req_mgr_process_sync_req(
static int __cam_req_mgr_check_sync_req_is_ready(
	struct cam_req_mgr_core_link *link,
	struct cam_req_mgr_slot *slot)
{
	struct cam_req_mgr_core_link *sync_link = NULL;
	int64_t req_id = 0;
	int sync_slot_idx = 0, rc = 0;
	int sync_slot_idx = 0, sync_rd_idx = 0, rc = 0;

	if (!link->sync_link) {
		CAM_ERR(CAM_CRM, "Sync link null");
@@ -774,94 +842,82 @@ static int __cam_req_mgr_process_sync_req(
	req_id = slot->req_id;

	CAM_DBG(CAM_REQ,
		"link_hdl %x req %lld sync_self_ref %lld sof_counter %lld frame_skip_flag %d sync_link_self_ref %lld",
		link->link_hdl, req_id, link->sync_self_ref, link->sof_counter,
		link->frame_skip_flag, link->sync_link->sync_self_ref);
		"link_hdl %x req %lld frame_skip_flag %d ",
		link->link_hdl, req_id, link->sync_link_sof_skip);

	if (sync_link->sync_link_sof_skip) {
		CAM_DBG(CAM_REQ,
			"No req applied on corresponding SOF on sync link: %x",
			sync_link->link_hdl);
		sync_link->sync_link_sof_skip = false;
		/*It is to manage compensate inject delay for each pd*/
		__cam_req_mgr_check_link_is_ready(link, slot->idx, true, true);
		return -EINVAL;
		__cam_req_mgr_inject_delay(link->req.l_tbl, slot->idx);
		return -EAGAIN;
	}

	if (link->sof_counter == -1) {
		__cam_req_mgr_sof_cnt_initialize(link);
	} else if ((link->frame_skip_flag) &&
		(sync_link->sync_self_ref != -1)) {
		CAM_DBG(CAM_REQ, "Link[%x] Req[%lld] Resetting values ",
			link->link_hdl, req_id);
		__cam_req_mgr_reset_sof_cnt(link);
		__cam_req_mgr_sof_cnt_initialize(link);
	} else {
		link->sof_counter++;
	rc = __cam_req_mgr_inject_delay(link->req.l_tbl, slot->idx);
	if (rc) {
		CAM_DBG(CAM_CRM,
			"Skip Process Req: %lld on link: %x",
			req_id, link->link_hdl);
		link->sync_link_sof_skip = true;
		return rc;
	}

	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true, true);
	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true);
	if (rc) {
		CAM_DBG(CAM_REQ,
		CAM_DBG(CAM_CRM,
			"Req: %lld [My link] not ready on link: %x, rc=%d",
			req_id, link->link_hdl, rc);
		link->sync_link_sof_skip = true;
		goto failure;
		return rc;
	}

	sync_slot_idx = __cam_req_mgr_find_slot_for_req(
		sync_link->req.in_q, req_id);

	if (sync_slot_idx != -1) {
		rc = __cam_req_mgr_check_link_is_ready(
			sync_link, sync_slot_idx, true, false);
		CAM_DBG(CAM_CRM, "sync_slot_idx=%d, status=%d, rc=%d",
			sync_slot_idx,
			sync_link->req.in_q->slot[sync_slot_idx].status,
			rc);
	} else {
		CAM_DBG(CAM_CRM, "sync_slot_idx=%d, rc=%d",
			sync_slot_idx, rc);
	if (sync_slot_idx == -1) {
		CAM_DBG(CAM_CRM, "Req: %lld not found on link: %x [other link]",
			req_id, sync_link->link_hdl);
		link->sync_link_sof_skip = true;
		return -EINVAL;
	}

	if ((sync_slot_idx != -1) &&
		((sync_link->req.in_q->slot[sync_slot_idx].status ==
		CRM_SLOT_STATUS_REQ_APPLIED) || (rc == 0))) {
		rc = __cam_req_mgr_validate_sof_cnt(link, sync_link);
		if (rc) {
	sync_rd_idx = sync_link->req.in_q->rd_idx;
	if ((sync_link->req.in_q->slot[sync_slot_idx].status !=
		CRM_SLOT_STATUS_REQ_APPLIED) &&
		((sync_slot_idx - sync_rd_idx) >= 1) &&
		(sync_link->req.in_q->slot[sync_rd_idx].status !=
		CRM_SLOT_STATUS_REQ_APPLIED)) {
		CAM_DBG(CAM_CRM,
				"Req: %lld validate failed: %x",
			"Req: %lld [other link] not next req to be applied on link: %x",
			req_id, sync_link->link_hdl);
			goto failure;
		link->sync_link_sof_skip = true;
		return -EAGAIN;
	}

	rc = __cam_req_mgr_check_link_is_ready(sync_link, sync_slot_idx, true);
	if (rc && (sync_link->req.in_q->slot[sync_slot_idx].status !=
		CRM_SLOT_STATUS_REQ_APPLIED)) {
		CAM_DBG(CAM_CRM,
			"Req: %lld not ready on [other link] link: %x, rc=%d",
			req_id, sync_link->link_hdl, rc);
		link->sync_link_sof_skip = true;
		return rc;
	}

	CAM_DBG(CAM_REQ,
		"Req: %lld ready to apply on link: %x [validation successful]",
		req_id, link->link_hdl);

	/*
	 *  At this point all validation is successfully done
	 *  and we can proceed to apply the given request.
	 *  Ideally the next call should return success.
	 */
		rc = __cam_req_mgr_check_link_is_ready(link,
			slot->idx, false, true);

	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, false);
	if (rc)
		CAM_WARN(CAM_CRM, "Unexpected return value rc: %d", rc);
	} else {
		CAM_DBG(CAM_REQ,
			"Req: %lld [Other link] not ready to apply on link: %x",
			req_id, sync_link->link_hdl);
		rc = -EPERM;
		link->sync_link_sof_skip = true;
		goto failure;
	}

	return rc;

failure:
	link->sof_counter--;
	return rc;
	return 0;
}

/**
@@ -922,11 +978,21 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
			goto error;
		}

		if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC)
			rc = __cam_req_mgr_process_sync_req(link, slot);
		if ((slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) &&
			(link->sync_link)) {
			if (link->is_master || link->sync_link->is_master)
				rc =  __cam_req_mgr_check_sync_for_mslave(
					link, slot);
			else
				rc = __cam_req_mgr_check_sync_req_is_ready(
					link, slot);
		} else {
			rc = __cam_req_mgr_inject_delay(link->req.l_tbl,
				slot->idx);
			if (!rc)
				rc = __cam_req_mgr_check_link_is_ready(link,
				slot->idx, false, true);
					slot->idx, false);
		}

		if (rc < 0) {
			/*
@@ -970,9 +1036,6 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
		}
		spin_unlock_bh(&link->link_state_spin_lock);

		if (link->sync_link_sof_skip)
			link->sync_link_sof_skip = false;

		if (link->trigger_mask == link->subscribe_event) {
			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
			link->trigger_mask = 0;
@@ -1481,21 +1544,14 @@ static void __cam_req_mgr_unreserve_link(
	for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
		if (session->links[i] == link)
			session->links[i] = NULL;
	}

	if ((session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
		(link->sync_link)) {
		/*
		 * make sure to unlink sync setup under the assumption
		 * of only having 2 links in a given session
		 */
		session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
		for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
			if (session->links[i])
		if (link->sync_link) {
			if (link->sync_link == session->links[i])
				session->links[i]->sync_link = NULL;
		}
	}

	link->sync_link = NULL;
	session->num_links--;
	CAM_DBG(CAM_CRM, "Active session links (%d)", session->num_links);
	mutex_unlock(&session->lock);
@@ -1769,8 +1825,9 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
	trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);

	if (slot->req_ready_map == tbl->dev_mask) {
		CAM_DBG(CAM_REQ, "idx %d req_id %lld pd %d SLOT READY",
			idx, add_req->req_id, tbl->pd);
		CAM_DBG(CAM_REQ,
			"link 0x%x idx %d req_id %lld pd %d SLOT READY",
			link->link_hdl, idx, add_req->req_id, tbl->pd);
		slot->state = CRM_REQ_STATE_READY;
	}
	mutex_unlock(&link->req.lock);
@@ -2673,6 +2730,33 @@ int cam_req_mgr_schedule_request(
	return rc;
}

/**
 * __cam_req_mgr_set_master_link()
 *
 * @brief    : Each links sets its max pd delay based on the devices on the
 *             link. The link with higher pd is assigned master.
 * @link1    : One of the sync links
 * @link2    : The other sync link
 */
static void __cam_req_mgr_set_master_link(
	struct cam_req_mgr_core_link *link1,
	struct cam_req_mgr_core_link *link2)
{

	if (link1->max_delay > link2->max_delay) {
		link1->is_master = true;
		link2->initial_skip = true;
	} else if (link2->max_delay > link1->max_delay) {
		link2->is_master = true;
		link1->initial_skip = true;
	}

	CAM_DBG(CAM_CRM,
		"link_hdl1[0x%x] is_master [%u] link_hdl2[0x%x] is_master[%u]",
		link1->link_hdl, link1->is_master,
		link2->link_hdl, link2->is_master);
}

int cam_req_mgr_sync_config(
	struct cam_req_mgr_sync_mode *sync_info)
{
@@ -2693,6 +2777,12 @@ int cam_req_mgr_sync_config(
		return -EINVAL;
	}

	if ((sync_info->sync_mode != CAM_REQ_MGR_SYNC_MODE_SYNC) &&
		(sync_info->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC)) {
		CAM_ERR(CAM_CRM, "Invalid sync mode %d", sync_info->sync_mode);
		return -EINVAL;
	}

	if ((!sync_info->link_hdls[0]) || (!sync_info->link_hdls[1])) {
		CAM_WARN(CAM_CRM, "Invalid link handles 0x%x 0x%x",
			sync_info->link_hdls[0], sync_info->link_hdls[1]);
@@ -2729,17 +2819,22 @@ int cam_req_mgr_sync_config(
		goto done;
	}

	link1->sof_counter = -1;
	link1->sync_self_ref = -1;
	link1->frame_skip_flag = false;
	link1->sync_link_sof_skip = false;
	link1->sync_link = link2;
	link1->sync_link = NULL;

	link2->sof_counter = -1;
	link2->sync_self_ref = -1;
	link2->frame_skip_flag = false;
	link2->sync_link_sof_skip = false;
	link2->sync_link = NULL;

	link1->is_master = false;
	link2->is_master = false;
	link1->initial_skip = false;
	link2->initial_skip = false;

	if (sync_info->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC) {
		link1->sync_link = link2;
		link2->sync_link = link1;
		__cam_req_mgr_set_master_link(link1, link2);
	}

	cam_session->sync_mode = sync_info->sync_mode;
	CAM_DBG(CAM_REQ,
+6 −12
Original line number Diff line number Diff line
@@ -135,9 +135,6 @@ enum cam_req_mgr_link_state {
 * @apply_data       : pointer which various tables will update during traverse
 * @in_q             : input request queue pointer
 * @validate_only    : Whether to validate only and/or update settings
 * @self_link        : To indicate whether the check is for the given link or
 *                     the other sync link
 * @inject_delay_chk : if inject delay has been validated for all pd devices
 * @open_req_cnt     : Count of open requests yet to be serviced in the kernel.
 */
struct cam_req_mgr_traverse {
@@ -147,8 +144,6 @@ struct cam_req_mgr_traverse {
	struct cam_req_mgr_apply     *apply_data;
	struct cam_req_mgr_req_queue *in_q;
	bool                          validate_only;
	bool                          self_link;
	bool                          inject_delay_chk;
	int32_t                       open_req_cnt;
};

@@ -308,10 +303,6 @@ struct cam_req_mgr_connected_device {
 *                         notification to CRM at those hw events.
 * @trigger_mask         : mask on which irq the req is already applied
 * @sync_link            : pointer to the sync link for synchronization
 * @sof_counter          : sof counter during sync_mode
 * @sync_self_ref        : reference sync count against which the difference
 *                         between sync_counts for a given link is checked
 * @frame_skip_flag      : flag that determines if a frame needs to be skipped
 * @sync_link_sof_skip   : flag determines if a pkt is not available for a given
 *                         frame in a particular link skip corresponding
 *                         frame in sync link as well.
@@ -319,6 +310,10 @@ struct cam_req_mgr_connected_device {
 *                         to be serviced in the kernel.
 * @last_flush_id        : Last request to flush
 * @is_used              : 1 if link is in use else 0
 * @is_master            : Based on pd among links, the link with the highest pd
 *                         is assigned as master
 * @initial_skip         : Flag to determine if slave has started streaming in
 *                         master-slave sync
 *
 */
struct cam_req_mgr_core_link {
@@ -338,13 +333,12 @@ struct cam_req_mgr_core_link {
	uint32_t                             subscribe_event;
	uint32_t                             trigger_mask;
	struct cam_req_mgr_core_link        *sync_link;
	int64_t                              sof_counter;
	int64_t                              sync_self_ref;
	bool                                 frame_skip_flag;
	bool                                 sync_link_sof_skip;
	int32_t                              open_req_cnt;
	uint32_t                             last_flush_id;
	atomic_t                             is_used;
	bool                                 is_master;
	bool                                 initial_skip;
};

/**