Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce2b2d67 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "firmware: qcom_scm: Fix compilation with QCOM_SCM config"

parents b8cb5d94 bd3584a6
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -267,6 +267,16 @@ config QCOM_SCM
	  Qualcomm Technologies, Inc. "Secure Channel Manager"
	  interface.

config QCOM_SCM_QCPE
	bool "Para-Virtualized Secure Channel Manager (SCM) support over QCPE"
	depends on QCOM_SCM
	default n
	help
	  To enable Secure Channel Mananger(SCM) support for SoC
	  in virtualized Linux, where SCM backend is QCPE (QCOM Protected
	  environment). The SCM channel will use QCOM HAB interface for
	  front-end to back-end communication.

config QTEE_SHM_BRIDGE
	bool "QTI TEE shared memory bridge"
	depends on QCOM_SCM
+256 −6
Original line number Diff line number Diff line
@@ -22,6 +22,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scm.h>

#include <linux/habmm.h>

#define MAX_QCOM_SCM_ARGS 10
#define MAX_QCOM_SCM_RETS 3

@@ -168,19 +170,251 @@ static struct qcom_scm_entry qcom_scm_wb[] = {
	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
};


#if IS_ENABLED(CONFIG_QCOM_SCM_QCPE)

#ifdef CONFIG_GHS_VMM
struct scm_extra_arg {
	union {
		u32 args32[N_EXT_SCM_ARGS];
		u64 args64[N_EXT_SCM_ARGS];
	};
};
#endif

struct smc_params_s {
	uint64_t fn_id;
	uint64_t arginfo;
	uint64_t args[MAX_SCM_ARGS];
} __packed;

static u32 handle;
static bool opened;

static int scm_qcpe_hab_open(void)
{
	int ret;

	if (!opened) {
		ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0);
		if (ret) {
			pr_err("habmm_socket_open failed with ret = %d\n", ret);
			return ret;
		}
		opened = true;
	}

	return 0;
}

static void scm_qcpe_hab_close(void)
{
	if (opened) {
		habmm_socket_close(handle);
		opened = false;
		handle = 0;
	}
}

/* Send SMC over HAB, receive the response. Both operations are blocking. */
/* This is meant to be called from non-atomic context. */
static int scm_qcpe_hab_send_receive(struct smc_params_s *smc_params,
	u32 *size_bytes)
{
	int ret;

	ret = habmm_socket_send(handle, smc_params, sizeof(*smc_params), 0);
	if (ret) {
		pr_err("habmm_socket_send failed, ret= 0x%x\n", ret);
		return ret;
	}

	memset(smc_params, 0x0, sizeof(*smc_params));

	do {
		*size_bytes = sizeof(*smc_params);
		ret = habmm_socket_recv(handle, smc_params, size_bytes, 0,
			HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
	} while (-EINTR == ret);

	if (ret) {
		pr_err("habmm_socket_recv failed, ret= 0x%x\n", ret);
		return ret;
	}

	return 0;
}

/* Send SMC over HAB, receive the response, in non-blocking mode. */
/* This is meant to be called from atomic context. */
static int scm_qcpe_hab_send_receive_atomic(struct smc_params_s *smc_params,
	u32 *size_bytes)
{
	int ret;
	unsigned long delay;

	delay = jiffies + (HZ); /* 1 second delay for send */

	do {
		ret = habmm_socket_send(handle,
			smc_params, sizeof(*smc_params),
			HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
	} while ((-EAGAIN == ret) && time_before(jiffies, delay));

	if (ret) {
		pr_err("HAB send failed, non-blocking, ret= 0x%x\n", ret);
		return ret;
	}

	memset(smc_params, 0x0, sizeof(*smc_params));

	delay = jiffies + (HZ); /* 1 second delay for receive */

	do {
		*size_bytes = sizeof(*smc_params);
		ret = habmm_socket_recv(handle, smc_params, size_bytes, 0,
			HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
	} while ((-EAGAIN == ret) && time_before(jiffies, delay) &&
		(*size_bytes == 0));

	if (ret) {
		pr_err("HAB recv failed, non-blocking, ret= 0x%x\n", ret);
		return ret;
	}

	return 0;
}


static int scm_call_qcpe(const struct arm_smccc_args *smc,
			 struct arm_smccc_res *res, const bool atomic)
{
	u32 size_bytes;
	struct smc_params_s smc_params = {0,};
	int ret;
#ifdef CONFIG_GHS_VMM
	int i;
	uint64_t arglen = smc->a[1] & 0xf;
	struct ion_handle *ihandle = NULL;
#endif

	pr_info("SCM IN [QCPE]: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
		smc->a[0], smc->a[1], smc->a[2], smc->a[3], smc->a[4], smc->a[5],
		smc->a[5]);

	if (!opened) {
		if (!atomic) {
			if (scm_qcpe_hab_open()) {
				pr_err("HAB channel re-open failed\n");
				return -ENODEV;
			}
		} else {
			pr_err("HAB channel is not opened\n");
			return -ENODEV;
		}
	}

	smc_params.fn_id   = smc->a[0];
	smc_params.arginfo = smc->a[1];
	smc_params.args[0] = smc->a[2];
	smc_params.args[1] = smc->a[3];
	smc_params.args[2] = smc->a[4];

#ifdef CONFIG_GHS_VMM
	if (arglen <= N_REGISTER_ARGS) {
		smc_params.args[FIRST_EXT_ARG_IDX] = smc->a[5];
	} else {
		struct scm_extra_arg *argbuf =
				(struct scm_extra_arg *)desc->extra_arg_buf;
		int j = 0;

		if (scm_version == SMC_CONVENTION_ARM_64)
			for (i = FIRST_EXT_ARG_IDX; i < MAX_QCOM_SCM_ARGS; i++)
				smc_params.args[i] = argbuf->args64[j++];
		else
			for (i = FIRST_EXT_ARG_IDX; i < MAX_QCOM_SCM_ARGS; i++)
				smc_params.args[i] = argbuf->args32[j++];
	}

	ret = ionize_buffers(smc->a[0] & (~SMC64_MASK), &smc_params, &ihandle);
	if (ret)
		return ret;
#else
	smc_params.args[3] = smc->a[5];
	smc_params.args[4] = 0;
#endif

	if (!atomic) {
		ret = scm_qcpe_hab_send_receive(&smc_params, &size_bytes);
		if (ret) {
			pr_err("send/receive failed, non-atomic, ret= 0x%x\n",
				ret);
			goto err_ret;
		}
	} else {
		ret = scm_qcpe_hab_send_receive_atomic(&smc_params,
			&size_bytes);
		if (ret) {
			pr_err("send/receive failed, ret= 0x%x\n", ret);
			goto err_ret;
		}
	}

	if (size_bytes != sizeof(smc_params)) {
		pr_err("habmm_socket_recv expected size: %lu, actual=%u\n",
				sizeof(smc_params),
				size_bytes);
		ret = QCOM_SCM_ERROR;
		goto err_ret;
	}

	res->a1 = smc_params.args[1];
	res->a2 = smc_params.args[2];
	res->a3 = smc_params.args[3];
	res->a0 = smc_params.args[0];
	pr_info("SCM OUT [QCPE]: 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
		res->a0, res->a1, res->a2, res->a3);
	goto no_err;

err_ret:
	if (!atomic) {
		/* In case of an error, try to recover the hab connection
		 * for next time. This can only be done if called in
		 * non-atomic context.
		 */
		scm_qcpe_hab_close();
		if (scm_qcpe_hab_open())
			pr_err("scm_qcpe_hab_open failed\n");
		}

no_err:
#ifdef CONFIG_GHS_VMM
	if (ihandle)
		free_ion_buffers(ihandle);
#endif
	return res->a0;
}

#endif /* CONFIG_QCOM_SCM_QCPE */

static void __qcom_scm_call_do_quirk(const struct arm_smccc_args *smc,
				     struct arm_smccc_res *res)
				     struct arm_smccc_res *res,
				     const bool atomic)
{
	unsigned long a0 = smc->a[0];
	ktime_t time;
	const bool trace = trace_scm_call_enabled();
#if !(IS_ENABLED(CONFIG_QCOM_SCM_QCPE))
	unsigned long a0 = smc->a[0];
	struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };

	quirk.state.a6 = 0;

#endif
	if (trace)
		time = ktime_get();

#if IS_ENABLED(CONFIG_QCOM_SCM_QCPE)
	scm_call_qcpe(smc, res, atomic);
#else
	do {
		arm_smccc_smc_quirk(a0, smc->a[1], smc->a[2], smc->a[3],
				    smc->a[4], smc->a[5], quirk.state.a6,
@@ -190,7 +424,7 @@ static void __qcom_scm_call_do_quirk(const struct arm_smccc_args *smc,
			a0 = res->a0;

	} while (res->a0 == QCOM_SCM_INTERRUPTED);

#endif
	if (trace)
		trace_scm_call(smc->a, res, ktime_us_delta(ktime_get(), time));
}
@@ -266,13 +500,13 @@ static int qcom_scm_call_smccc(struct device *dev,
	}

	if (atomic) {
		__qcom_scm_call_do_quirk(&smc, &res);
		__qcom_scm_call_do_quirk(&smc, &res, true);
	} else {
		int retry_count = 0;

		do {
			mutex_lock(&qcom_scm_lock);
			__qcom_scm_call_do_quirk(&smc, &res);
			__qcom_scm_call_do_quirk(&smc, &res, false);
			mutex_unlock(&qcom_scm_lock);

			if (res.a0 == QCOM_SCM_V2_EBUSY) {
@@ -2169,5 +2403,21 @@ int __init scm_mem_protection_init_do(struct device *dev)

void __qcom_scm_init(void)
{

#if IS_ENABLED(CONFIG_QCOM_SCM_QCPE)
/**
 * The HAB connection should be opened before first SMC call.
 * If not, there could be errors that might cause the
 * system to crash.
 */
	scm_qcpe_hab_open();
#endif
	__query_convention();
}

#if IS_ENABLED(CONFIG_QCOM_SCM_QCPE)
void __qcom_scm_qcpe_exit(void)
{
	scm_qcpe_hab_close();
}
#endif
+3 −0
Original line number Diff line number Diff line
@@ -1213,6 +1213,9 @@ early_initcall(scm_mem_protection_init);
#if IS_MODULE(CONFIG_QCOM_SCM)
static void __exit qcom_scm_exit(void)
{
#if IS_ENABLED(CONFIG_QCOM_SCM_QCPE)
	__qcom_scm_qcpe_exit();
#endif
	platform_driver_unregister(&qcom_scm_driver);
	qtee_shmbridge_driver_exit();
}