Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 16c5612c authored by Bhalchandra Gajare's avatar Bhalchandra Gajare
Browse files

ASoC: wcd_cpe: Fix message queue list corruption



When clients are trying to send message while the worker thread is
processing existing messages, there is possibility that the message
queue can get corrupted. This can result into kernel crash. Fix to make
sure the message queue does not get corrupted.

CRs-fixed: 951194
Change-Id: I1eb5232d9d079ecc9d28b95737333c1198a8b20c
Signed-off-by: default avatarBhalchandra Gajare <gajare@codeaurora.org>
parent 2f49086b
Loading
Loading
Loading
Loading
+27 −10
Original line number Diff line number Diff line
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -823,6 +823,15 @@ static int wcd_cpe_enable(struct wcd_cpe_core *core,
	int ret = 0;

	if (enable) {
		/* Reset CPE first */
		ret = cpe_svc_reset(core->cpe_handle);
		if (IS_ERR_VALUE(ret)) {
			dev_err(core->dev,
				"%s: CPE Reset failed, error = %d\n",
				__func__, ret);
			goto done;
		}

		ret = wcd_cpe_setup_irqs(core);
		if (ret) {
			dev_err(core->dev,
@@ -892,11 +901,10 @@ static int wcd_cpe_enable(struct wcd_cpe_core *core,
			goto done;
		}

		/* Reset CPE first */
		ret = cpe_svc_reset(core->cpe_handle);
		ret = cpe_svc_shutdown(core->cpe_handle);
		if (IS_ERR_VALUE(ret)) {
			dev_err(core->dev,
				"%s: Failed to reset CPE with error %d\n",
				"%s: CPE shutdown failed, error %d\n",
				__func__, ret);
			goto done;
		}
@@ -1378,6 +1386,15 @@ static void wcd_cpe_svc_event_cb(const struct cpe_svc_notification *param)
		complete(&core->online_compl);
		break;
	case CPE_SVC_OFFLINE:
		/*
		 * offline can happen during normal shutdown,
		 * but we are interested in offline only during
		 * SSR.
		 */
		if (core->ssr_type != WCD_CPE_SSR_EVENT &&
		    core->ssr_type != WCD_CPE_BUS_DOWN_EVENT)
			break;

		active_sessions = wcd_cpe_lsm_session_active();
		wcd_cpe_change_online_state(core, 0);
		complete(&core->offline_compl);
@@ -3677,18 +3694,18 @@ static int wcd_cpe_dealloc_lsm_session(void *core_handle,
	lsm_sessions[session->id] = NULL;
	kfree(session);

	ret = wcd_cpe_vote(core, false);
	if (ret)
		dev_dbg(core->dev,
			"%s: Failed to un-vote cpe, err = %d\n",
			__func__, ret);

	if (!wcd_cpe_lsm_session_active()) {
		cmi_deregister(core->cmi_afe_handle);
		core->cmi_afe_handle = NULL;
		wcd_cpe_deinitialize_afe_port_data();
	}

	ret = wcd_cpe_vote(core, false);
	if (ret)
		dev_dbg(core->dev,
			"%s: Failed to un-vote cpe, err = %d\n",
			__func__, ret);

	return ret;
}

+75 −26
Original line number Diff line number Diff line
/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -162,6 +162,8 @@ struct cpe_info {
	struct list_head main_queue;
	struct completion cmd_complete;
	void *thread_handler;
	bool stop_thread;
	struct mutex msg_lock;
	enum cpe_state state;
	enum cpe_substate substate;
	struct list_head client_list;
@@ -284,6 +286,8 @@ struct cpe_priv {

static struct cpe_priv cpe_d;

static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle);

static enum cpe_svc_result cpe_is_command_valid(
		const struct cpe_info *t_info,
		enum cpe_command command);
@@ -343,6 +347,8 @@ static bool cpe_register_read_autoinc_supported(void)
	return true;
}


/* Called under msgq locked context */
static void cpe_cmd_received(struct cpe_info *t_info)
{
	struct cpe_command_node *node = NULL;
@@ -388,10 +394,22 @@ static int cpe_worker_thread(void *context)

	while (!kthread_should_stop()) {
		wait_for_completion(&t_info->cmd_complete);

		CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
		cpe_cmd_received(t_info);
		reinit_completion(&t_info->cmd_complete);
		if (t_info->stop_thread)
			goto unlock_and_exit;
		CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");
	};

	pr_debug("%s: thread exited\n", __func__);
	return 0;

unlock_and_exit:
	pr_debug("%s: thread stopped\n", __func__);
	CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");

	return 0;
}

@@ -399,13 +417,29 @@ static void cpe_create_worker_thread(struct cpe_info *t_info)
{
	INIT_LIST_HEAD(&t_info->main_queue);
	init_completion(&t_info->cmd_complete);
	t_info->stop_thread = false;
	t_info->thread_handler = kthread_run(cpe_worker_thread,
		(void *)t_info, "cpe-worker-thread");
	pr_debug("%s: Created new worker thread\n",
		 __func__);
}

static void cpe_cleanup_worker_thread(struct cpe_info *t_info)
{
	if (t_info->thread_handler != NULL)
	if (!t_info->thread_handler) {
		pr_err("%s: thread not created\n", __func__);
		return;
	}

	/*
	 * Wake up the command handler in case
	 * it is waiting for an command to be processed.
	 */
	CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
	t_info->stop_thread = true;
	complete(&t_info->cmd_complete);
	CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");

	kthread_stop(t_info->thread_handler);

	t_info->thread_handler = NULL;
@@ -436,14 +470,17 @@ cpe_send_cmd_to_thread(struct cpe_info *t_info,

	cmd->command = command;
	cmd->data = data;

	CPE_SVC_GRAB_LOCK(&t_info->msg_lock, "msg_lock");
	if (high_prio)
		list_add(&(cmd->list),
			 &(t_info->main_queue));
	else
		list_add_tail(&(cmd->list),
			      &(t_info->main_queue));

	complete(&t_info->cmd_complete);
	CPE_SVC_REL_LOCK(&t_info->msg_lock, "msg_lock");

	return rc;
}

@@ -825,9 +862,7 @@ static void cpe_process_irq_int(u32 irq,
	case CPE_IRQ_WDOG_BITE:
	case CPE_IRQ_RCO_WDOG_INT:
		err_irq = true;
		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
		cpe_svc_shutdown(t_info);
		CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
		__cpe_svc_shutdown(t_info);
		break;

	case CPE_IRQ_FLL_LOCK_LOST:
@@ -1222,17 +1257,6 @@ static enum cpe_process_result cpe_process_kill_thread(
{
	struct cpe_svc_notification payload;

	if (t_info->pending) {
		struct cpe_send_msg *m =
			(struct cpe_send_msg *)t_info->pending;
		cpe_notify_cmi_client(t_info, m->payload,
			CPE_SVC_SHUTTING_DOWN);
		kfree(t_info->pending);
		t_info->pending = NULL;
	}

	cpe_command_cleanup(command_node);
	kfree(command_node);
	cpe_d.cpe_msg_buffer = 0;
	payload.result = CPE_SVC_SHUTTING_DOWN;
	payload.event = CPE_SVC_OFFLINE;
@@ -1245,7 +1269,6 @@ static enum cpe_process_result cpe_process_kill_thread(
	cpe_change_state(t_info, CPE_STATE_OFFLINE,
			 CPE_SS_IDLE);
	cpe_broadcast_notification(t_info, &payload);
	cpe_cleanup_worker_thread(t_info);

	return CPE_PROC_KILLED;
}
@@ -1562,6 +1585,7 @@ void *cpe_svc_initialize(

	memset(t_info->tgt->outbox, 0, cap->outbox_size);
	memset(t_info->tgt->inbox, 0, cap->inbox_size);
	mutex_init(&t_info->msg_lock);
	cpe_d.cpe_irq_control_callback = irq_control_callback;
	t_info->cpe_process_command = cpe_mt_process_cmd;
	t_info->cpe_cmd_validate = cpe_mt_validate_cmd;
@@ -1607,6 +1631,7 @@ enum cpe_svc_result cpe_svc_deinitialize(void *cpe_handle)
	t_info->tgt->tgt_deinit(t_info->tgt);
	cpe_change_state(t_info, CPE_STATE_UNINITIALIZED,
			 CPE_SS_IDLE);
	mutex_destroy(&t_info->msg_lock);
	kfree(t_info->tgt);
	kfree(t_info);
	mutex_destroy(&cpe_d.cpe_api_mutex);
@@ -1755,13 +1780,13 @@ enum cpe_svc_result cpe_svc_route_notification(void *cpe_handle,
	return rc;
}

enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
static enum cpe_svc_result __cpe_svc_shutdown(void *cpe_handle)
{
	enum cpe_svc_result rc = CPE_SVC_SUCCESS;
	struct cpe_info *t_info = (struct cpe_info *)cpe_handle;
	struct cpe_command_node *n = NULL;
	struct cpe_command_node kill_cmd;

	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
	if (!t_info)
		t_info = cpe_d.cpe_default_handle;

@@ -1770,7 +1795,6 @@ enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
	if (rc != CPE_SVC_SUCCESS) {
		pr_err("%s: cmd validation fail, cmd = %d\n",
			__func__, CPE_CMD_SHUTDOWN);
		CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
		return rc;
	}

@@ -1782,7 +1806,11 @@ enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
			cpe_notify_cmi_client(t_info, (u8 *)n->data,
				CPE_SVC_SHUTTING_DOWN);
		}

		/*
		 * Since command cannot be processed,
		 * delete it from the list and perform cleanup
		 */
		list_del(&n->list);
		cpe_command_cleanup(n);
		kfree(n);
	}
@@ -1792,10 +1820,31 @@ enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
	t_info->state = CPE_STATE_OFFLINE;
	t_info->substate = CPE_SS_IDLE;

	rc = cpe_send_cmd_to_thread(t_info, CPE_CMD_KILL_THREAD,
				    NULL, true);
	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
	memset(&kill_cmd, 0, sizeof(kill_cmd));
	kill_cmd.command = CPE_CMD_KILL_THREAD;

	if (t_info->pending) {
		struct cpe_send_msg *m =
			(struct cpe_send_msg *)t_info->pending;
		cpe_notify_cmi_client(t_info, m->payload,
			CPE_SVC_SHUTTING_DOWN);
		kfree(t_info->pending);
		t_info->pending = NULL;
	}

	cpe_cleanup_worker_thread(t_info);
	t_info->cpe_process_command(&kill_cmd);

	return rc;
}

enum cpe_svc_result cpe_svc_shutdown(void *cpe_handle)
{
	enum cpe_svc_result rc = CPE_SVC_SUCCESS;

	CPE_SVC_GRAB_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
	rc = __cpe_svc_shutdown(cpe_handle);
	CPE_SVC_REL_LOCK(&cpe_d.cpe_api_mutex, "cpe_api");
	return rc;
}