Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4389ad42 authored by Zhen Kong's avatar Zhen Kong
Browse files

qseecom: update cumulative_mode and timer properly



qseecom.cumulative_mode is used to track the cummulative bus request
mode from all concurrent running clients. It is registered before
send_cmd operation, but is not un-registered after it is complete.
This may cause the bus bandwidth is restored to a wrong mode after
resuming from sleep. Thus, we make change to update it accordingly.
qseecom.current_mode is also updated in suspend/resume function to
ensure timer can scale down bus bw after resume when cumulative_mode
is non-zero. Besides, timer operation is optimized in case there are
multiple unserialized concurrent users of the same timer.

Change-Id: I06043d2b88a47e24da03460fe1ae26a94cb6a608
Signed-off-by: default avatarZhen Kong <zkong@codeaurora.org>
parent 37239795
Loading
Loading
Loading
Loading
+51 −10
Original line number Diff line number Diff line
@@ -628,7 +628,8 @@ static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
	mutex_lock(&qsee_bw_mutex);
	qseecom.bw_scale_down_timer.expires = jiffies +
		msecs_to_jiffies(duration);
	add_timer(&(qseecom.bw_scale_down_timer));
	mod_timer(&(qseecom.bw_scale_down_timer),
		qseecom.bw_scale_down_timer.expires);
	qseecom.timer_running = true;
	mutex_unlock(&qsee_bw_mutex);
}
@@ -2313,6 +2314,10 @@ int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
		if (!qseecom.support_bus_scaling) {
			qsee_disable_clock_vote(handle->dev, CLK_DFAB);
			qsee_disable_clock_vote(handle->dev, CLK_SFPB);
		} else {
			mutex_lock(&qsee_bw_mutex);
			qseecom_unregister_bus_bandwidth_needs(handle->dev);
			mutex_unlock(&qsee_bw_mutex);
		}
	}
	return ret;
@@ -3607,6 +3612,13 @@ static long qseecom_ioctl(struct file *file, unsigned cmd,
		/* Only one client allowed here at a time */
		mutex_lock(&app_access_lock);
		if (qseecom.support_bus_scaling) {
			/* register bus bw in case the client doesn't do it */
			if (!data->mode) {
				mutex_lock(&qsee_bw_mutex);
				__qseecom_register_bus_bandwidth_needs(
								data, HIGH);
				mutex_unlock(&qsee_bw_mutex);
			}
			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
			if (ret) {
				pr_err("Failed to set bw.\n");
@@ -3638,6 +3650,12 @@ static long qseecom_ioctl(struct file *file, unsigned cmd,
		/* Only one client allowed here at a time */
		mutex_lock(&app_access_lock);
		if (qseecom.support_bus_scaling) {
			if (!data->mode) {
				mutex_lock(&qsee_bw_mutex);
				__qseecom_register_bus_bandwidth_needs(
								data, HIGH);
				mutex_unlock(&qsee_bw_mutex);
			}
			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
			if (ret) {
				pr_err("Failed to set bw.\n");
@@ -3809,6 +3827,10 @@ static long qseecom_ioctl(struct file *file, unsigned cmd,
		if (!qseecom.support_bus_scaling) {
			qsee_disable_clock_vote(data, CLK_DFAB);
			qsee_disable_clock_vote(data, CLK_SFPB);
		} else {
			mutex_lock(&qsee_bw_mutex);
			qseecom_unregister_bus_bandwidth_needs(data);
			mutex_unlock(&qsee_bw_mutex);
		}
		atomic_dec(&data->ioctl_count);
		break;
@@ -4609,13 +4631,19 @@ static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
	struct qseecom_clk *qclk;
	qclk = &qseecom.qsee;

	if (qseecom.cumulative_mode != INACTIVE) {
	mutex_lock(&qsee_bw_mutex);
	mutex_lock(&clk_access_lock);

	if (qseecom.cumulative_mode != INACTIVE &&
		qseecom.current_mode != INACTIVE) {
		ret = msm_bus_scale_client_update_request(
			qseecom.qsee_perf_client, INACTIVE);
		if (ret)
			pr_err("Fail to scale down bus\n");
		else
			qseecom.current_mode = INACTIVE;
	}
	mutex_lock(&clk_access_lock);

	if (qclk->clk_access_cnt) {
		if (qclk->ce_clk != NULL)
			clk_disable_unprepare(qclk->ce_clk);
@@ -4623,12 +4651,14 @@ static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
			clk_disable_unprepare(qclk->ce_core_clk);
		if (qclk->ce_bus_clk != NULL)
			clk_disable_unprepare(qclk->ce_bus_clk);
		if (qseecom.timer_running) {
	}

	del_timer_sync(&(qseecom.bw_scale_down_timer));
	qseecom.timer_running = false;
		}
	}

	mutex_unlock(&clk_access_lock);
	mutex_unlock(&qsee_bw_mutex);

	return 0;
}

@@ -4639,6 +4669,8 @@ static int qseecom_resume(struct platform_device *pdev)
	struct qseecom_clk *qclk;
	qclk = &qseecom.qsee;

	mutex_lock(&qsee_bw_mutex);
	mutex_lock(&clk_access_lock);
	if (qseecom.cumulative_mode >= HIGH)
		mode = HIGH;
	else
@@ -4649,9 +4681,10 @@ static int qseecom_resume(struct platform_device *pdev)
			qseecom.qsee_perf_client, mode);
		if (ret)
			pr_err("Fail to scale up bus to %d\n", mode);
		else
			qseecom.current_mode = mode;
	}

	mutex_lock(&clk_access_lock);
	if (qclk->clk_access_cnt) {

		ret = clk_prepare_enable(qclk->ce_core_clk);
@@ -4674,13 +4707,20 @@ static int qseecom_resume(struct platform_device *pdev)
			qclk->clk_access_cnt = 0;
			goto ce_bus_clk_err;
		}
	}

	if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
		qseecom.bw_scale_down_timer.expires = jiffies +
			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
		add_timer(&(qseecom.bw_scale_down_timer));
		mod_timer(&(qseecom.bw_scale_down_timer),
				qseecom.bw_scale_down_timer.expires);
		qseecom.timer_running = true;

	}

	mutex_unlock(&clk_access_lock);
	mutex_unlock(&qsee_bw_mutex);


	return 0;

ce_bus_clk_err:
@@ -4689,6 +4729,7 @@ ce_clk_err:
	clk_disable_unprepare(qclk->ce_core_clk);
err:
	mutex_unlock(&clk_access_lock);
	mutex_unlock(&qsee_bw_mutex);
	return -EIO;
}
static struct of_device_id qseecom_match[] = {