Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c2e1c1a authored by Ravikishore Pampana's avatar Ravikishore Pampana
Browse files

msm: camera: crm: protect unlink function with lock



Camera request manager need to handle the unlink IOCTL and
cam_req_mgr_cb_add_req simultaneously. Added link mutex protection in
the unlink and cam_req_mgr_cb_add_req functions. Added link state
check in the camera request manager cam_req_mgr_ops functions.
Without state check and mutex protection, cam_req_mgr_cb_add_req
may encounter link work queue use after free issue.

Change-Id: I062c0f7d6db842e4785aaac461a6132bcdabbe86
Signed-off-by: default avatarRavikishore Pampana <rpampana@codeaurora.org>
parent 25fb250a
Loading
Loading
Loading
Loading
+50 −13
Original line number Diff line number Diff line
@@ -564,6 +564,7 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
			  * hence try again in next sof
			  */
			slot->status = CRM_SLOT_STATUS_REQ_PENDING;
			spin_lock_bh(&link->link_state_spin_lock);
			if (link->state == CAM_CRM_LINK_STATE_ERR) {
				/*
				 * During error recovery all tables should be
@@ -576,6 +577,7 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
					in_q->slot[in_q->rd_idx].status);
				rc = -EPERM;
			}
			spin_unlock_bh(&link->link_state_spin_lock);
			return rc;
		}
	}
@@ -592,13 +594,14 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
	} else {
		link->trigger_mask |= trigger;

		spin_lock_bh(&link->link_state_spin_lock);
		if (link->state == CAM_CRM_LINK_STATE_ERR) {
			CAM_WARN(CAM_CRM, "Err recovery done idx %d",
				in_q->rd_idx);
			mutex_lock(&link->lock);
			link->state = CAM_CRM_LINK_STATE_READY;
			mutex_unlock(&link->lock);
		}
		spin_unlock_bh(&link->link_state_spin_lock);

		if (link->trigger_mask == link->subscribe_event) {
			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
			link->trigger_mask = 0;
@@ -867,8 +870,6 @@ static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
	struct cam_req_mgr_connected_device    *dev;
	struct cam_req_mgr_core_dev_link_setup  link_data;

	mutex_lock(&link->lock);

	link_data.link_enable = 0;
	link_data.link_hdl = link->link_hdl;
	link_data.crm_cb = NULL;
@@ -895,7 +896,6 @@ static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
	link->num_devs = 0;
	link->max_delay = 0;

	mutex_unlock(&link->lock);
}

/**
@@ -938,6 +938,7 @@ static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
		return NULL;
	}
	mutex_init(&link->lock);
	spin_lock_init(&link->link_state_spin_lock);

	mutex_lock(&link->lock);
	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
@@ -1348,9 +1349,9 @@ int cam_req_mgr_process_error(void *priv, void *data)
			__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
			in_q->rd_idx = idx;
			in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
			mutex_lock(&link->lock);
			spin_lock_bh(&link->link_state_spin_lock);
			link->state = CAM_CRM_LINK_STATE_ERR;
			mutex_unlock(&link->lock);
			spin_unlock_bh(&link->link_state_spin_lock);
		}
	}
	mutex_unlock(&link->req.lock);
@@ -1401,11 +1402,14 @@ static int cam_req_mgr_process_trigger(void *priv, void *data)
	CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
		link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);

	spin_lock_bh(&link->link_state_spin_lock);
	if (link->state == CAM_CRM_LINK_STATE_ERR)
		CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
			in_q->rd_idx,
			in_q->slot[in_q->rd_idx].status);

	spin_unlock_bh(&link->link_state_spin_lock);

	if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
		/*
		 * Do NOT reset req q slot data here, it can not be done
@@ -1461,6 +1465,16 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
		goto end;
	}

	mutex_lock(&link->lock);
	spin_lock_bh(&link->link_state_spin_lock);
	if (link->state != CAM_CRM_LINK_STATE_READY) {
		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
		rc = -EPERM;
		spin_unlock_bh(&link->link_state_spin_lock);
		goto end;
	}
	spin_unlock_bh(&link->link_state_spin_lock);

	/* Validate if req id is present in input queue */
	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
	if (idx < 0) {
@@ -1490,6 +1504,7 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
		add_req->dev_hdl, add_req->req_id);

end:
	mutex_unlock(&link->lock);
	return rc;
}

@@ -1525,6 +1540,15 @@ static int cam_req_mgr_cb_notify_err(
		goto end;
	}

	spin_lock_bh(&link->link_state_spin_lock);
	if (link->state != CAM_CRM_LINK_STATE_READY) {
		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
		spin_unlock_bh(&link->link_state_spin_lock);
		rc = -EPERM;
		goto end;
	}
	spin_unlock_bh(&link->link_state_spin_lock);

	crm_timer_reset(link->watchdog);
	task = cam_req_mgr_workq_get_task(link->workq);
	if (!task) {
@@ -1579,6 +1603,15 @@ static int cam_req_mgr_cb_notify_trigger(
		goto end;
	}

	spin_lock_bh(&link->link_state_spin_lock);
	if (link->state != CAM_CRM_LINK_STATE_READY) {
		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
		spin_unlock_bh(&link->link_state_spin_lock);
		rc = -EPERM;
		goto end;
	}
	spin_unlock_bh(&link->link_state_spin_lock);

	crm_timer_reset(link->watchdog);
	task = cam_req_mgr_workq_get_task(link->workq);
	if (!task) {
@@ -1639,7 +1672,6 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
	if (rc < 0)
		return rc;

	mutex_lock(&link->lock);
	max_delay = CAM_PIPELINE_DELAY_0;
	for (i = 0; i < link_info->num_devices; i++) {
		dev = &link->l_dev[i];
@@ -1742,7 +1774,6 @@ static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
	/* At start, expect max pd devices, all are in skip state */
	__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);

	mutex_unlock(&link->lock);
	return 0;

error:
@@ -1882,11 +1913,9 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
	if (link->link_hdl < 0) {
		CAM_ERR(CAM_CRM,
			"Insufficient memory to create new device handle");
		mutex_unlock(&link->lock);
		rc = link->link_hdl;
		goto link_hdl_fail;
	}
	mutex_unlock(&link->lock);
	link_info->link_hdl = link->link_hdl;

	/* Allocate memory to hold data of all linked devs */
@@ -1903,9 +1932,9 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
	if (rc < 0)
		goto setup_failed;

	mutex_lock(&link->lock);
	spin_lock_bh(&link->link_state_spin_lock);
	link->state = CAM_CRM_LINK_STATE_READY;
	mutex_unlock(&link->lock);
	spin_unlock_bh(&link->link_state_spin_lock);

	/* Create worker for current link */
	snprintf(buf, sizeof(buf), "%x-%x",
@@ -1936,6 +1965,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
		goto setup_failed;
	}

	mutex_unlock(&link->lock);
	mutex_unlock(&g_crm_core_dev->crm_lock);
	return rc;
setup_failed:
@@ -1944,6 +1974,7 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
	cam_destroy_device_hdl(link->link_hdl);
	link_info->link_hdl = 0;
link_hdl_fail:
	mutex_unlock(&link->lock);
	__cam_req_mgr_unreserve_link(cam_session, &link);
	mutex_unlock(&g_crm_core_dev->crm_lock);
	return rc;
@@ -1979,6 +2010,11 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
		mutex_unlock(&g_crm_core_dev->crm_lock);
		return -EINVAL;
	}

	mutex_lock(&link->lock);
	spin_lock_bh(&link->link_state_spin_lock);
	link->state = CAM_CRM_LINK_STATE_IDLE;
	spin_unlock_bh(&link->link_state_spin_lock);
	__cam_req_mgr_print_req_tbl(&link->req);

	/* Destroy workq payload data */
@@ -2004,6 +2040,7 @@ int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
	}

	/* Free curent link and put back into session's free pool of links */
	mutex_unlock(&link->lock);
	__cam_req_mgr_unreserve_link(cam_session, &link);
	mutex_unlock(&g_crm_core_dev->crm_lock);

+20 −17
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#ifndef _CAM_REQ_MGR_CORE_H_
#define _CAM_REQ_MGR_CORE_H_

#include <linux/spinlock.h>
#include "cam_req_mgr_interface.h"
#include "cam_req_mgr_core_defs.h"
#include "cam_req_mgr_timer.h"
@@ -263,8 +264,8 @@ struct cam_req_mgr_connected_device {
 * @num_devs             : num of connected devices to this link
 * @max_delay            : Max of pipeline delay of all connected devs
 * @workq                : Pointer to handle workq related jobs
 * @pd_mask        : each set bit indicates the device with pd equal to bit
 *                   position is available.
 * @pd_mask              : each set bit indicates the device with pd equal to
 *                          bit position is available.
 * - List of connected devices
 * @l_dev                : List of connected devices to this link
 * - Request handling data struct
@@ -272,13 +273,14 @@ struct cam_req_mgr_connected_device {
 * - Timer
 * @watchdog             : watchdog timer to recover from sof freeze
 * - Link private data
 * @workq_comp     : conditional variable to block user thread for workq to
 *                   finish schedule request processing
 * @workq_comp           : conditional variable to block user thread for workq
 *                          to finish schedule request processing
 * @state                : link state machine
 * @parent               : pvt data - link's parent is session
 * @lock                 : mutex lock to guard link data operations
 * @subscribe_event: irqs that link subscribes, IFE should send notification
 * to CRM at those hw events.
 * @link_state_spin_lock : spin lock to protect link state variable
 * @subscribe_event      : irqs that link subscribes, IFE should send
 *                         notification to CRM at those hw events.
 * @trigger_mask         : mask on which irq the req is already applied
 */
struct cam_req_mgr_core_link {
@@ -294,6 +296,7 @@ struct cam_req_mgr_core_link {
	enum cam_req_mgr_link_state          state;
	void                                *parent;
	struct mutex                         lock;
	spinlock_t                           link_state_spin_lock;
	uint32_t                             subscribe_event;
	uint32_t                             trigger_mask;
};