Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bbb755a7 authored by Prasad Sodagudi's avatar Prasad Sodagudi
Browse files

soc: qcom: Remove lagacy scm_call API support



Legacy smc_call support is depricated, so remove
scm_call and scm_call_atomic* related calls support.

Change-Id: I32b9b7638080911ed167db11ae15bbfe589537e5
Signed-off-by: default avatarPrasad Sodagudi <psodagud@codeaurora.org>
parent c65d315d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -8,4 +8,4 @@ obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM)	+= smsm.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
obj-$(CONFIG_QCOM_SCM)  +=      scm.o scm-boot.o
obj-$(CONFIG_QCOM_SCM)  +=      scm.o

drivers/soc/qcom/scm-boot.c

deleted100644 → 0
+0 −111
Original line number Diff line number Diff line
/* Copyright (c) 2010, 2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/module.h>
#include <linux/slab.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/scm-boot.h>

/*
 * Set the cold/warm boot address for one of the CPU cores.
 */
int scm_set_boot_addr(phys_addr_t addr, unsigned int flags)
{
	struct {
		u32 flags;
		u32 addr;
	} cmd;

	cmd.addr = addr;
	cmd.flags = flags;
	return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
			&cmd, sizeof(cmd), NULL, 0);
}
EXPORT_SYMBOL(scm_set_boot_addr);

/**
 *	scm_set_boot_addr_mc - Set entry physical address for cpus
 *	@addr:	32bit physical address
 *	@aff0:	Collective bitmask of the affinity-level-0 of the mpidr
 *		1<<aff0_CPU0| 1<<aff0_CPU1....... | 1<<aff0_CPU32
 *		Supports maximum 32 cpus under any affinity level.
 *	@aff1:	Collective bitmask of the affinity-level-1 of the mpidr
 *	@aff2:	Collective bitmask of the affinity-level-2 of the mpidr
 *	@flags:	Flag to differentiate between coldboot vs warmboot
 */
int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
		u32 aff1, u32 aff2, u32 flags)
{
	struct {
		u32 addr;
		u32 aff0;
		u32 aff1;
		u32 aff2;
		u32 reserved;
		u32 flags;
	} cmd;
	struct scm_desc desc = {0};

	if (!is_scm_armv8()) {
		cmd.addr = addr;
		cmd.aff0 = aff0;
		cmd.aff1 = aff1;
		cmd.aff2 = aff2;
		/*
		 * Reserved for future chips with affinity level 3 effectively
		 * 1 << 0
		 */
		cmd.reserved = ~0U;
		cmd.flags = flags | SCM_FLAG_HLOS;
		return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC,
				&cmd, sizeof(cmd), NULL, 0);
	}

	flags = flags | SCM_FLAG_HLOS;
	desc.args[0] = addr;
	desc.args[1] = aff0;
	desc.args[2] = aff1;
	desc.args[3] = aff2;
	desc.args[4] = ~0ULL;
	desc.args[5] = flags;
	desc.arginfo = SCM_ARGS(6);

	return scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC), &desc);
}
EXPORT_SYMBOL(scm_set_boot_addr_mc);

/**
 *	scm_set_warm_boot_addr_mc_for_all -
 *	Set entry physical address for __all__ possible cpus
 *	This API passes all_set mask to secure-os and relies
 *	on secure-os to appropriately
 *	set the boot-address on the current system.
 *	@addr:	32bit physical address
 */

int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr)
{
	return scm_set_boot_addr_mc(addr, ~0U, ~0U, ~0U,
			SCM_FLAG_WARMBOOT_MC);
}
EXPORT_SYMBOL(scm_set_warm_boot_addr_mc_for_all);

/**
 *	scm_is_mc_boot_available -
 *	Checks if TZ supports the boot API for multi-cluster configuration
 *	Returns true if available and false otherwise
 */
int scm_is_mc_boot_available(void)
{
	return scm_is_call_available(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC);
}
EXPORT_SYMBOL(scm_is_mc_boot_available);
+20 −626
Original line number Diff line number Diff line
@@ -56,60 +56,6 @@ DEFINE_MUTEX(scm_lmh_lock);
#define SMC_ATOMIC_MASK 0x80000000
#define IS_CALL_AVAIL_CMD 1

#define SCM_BUF_LEN(__cmd_size, __resp_size) ({ \
	size_t x =  __cmd_size + __resp_size; \
	size_t y = sizeof(struct scm_command) + sizeof(struct scm_response); \
	size_t result; \
	if (x < __cmd_size || (x + y) < x) \
		result = 0; \
	else \
		result = x + y; \
	result; \
	})
/**
 * struct scm_command - one SCM command buffer
 * @len: total available memory for command and response
 * @buf_offset: start of command buffer
 * @resp_hdr_offset: start of response buffer
 * @id: command to be executed
 * @buf: buffer returned from scm_get_command_buffer()
 *
 * An SCM command is laid out in memory as follows:
 *
 *	------------------- <--- struct scm_command
 *	| command header  |
 *	------------------- <--- scm_get_command_buffer()
 *	| command buffer  |
 *	------------------- <--- struct scm_response and
 *	| response header |      scm_command_to_response()
 *	------------------- <--- scm_get_response_buffer()
 *	| response buffer |
 *	-------------------
 *
 * There can be arbitrary padding between the headers and buffers so
 * you should always use the appropriate scm_get_*_buffer() routines
 * to access the buffers in a safe manner.
 */
struct scm_command {
	u32	len;
	u32	buf_offset;
	u32	resp_hdr_offset;
	u32	id;
	u32	buf[0];
};

/**
 * struct scm_response - one SCM response buffer
 * @len: total available memory for response
 * @buf_offset: start of response data relative to start of scm_response
 * @is_complete: indicates if the command has finished processing
 */
struct scm_response {
	u32	len;
	u32	buf_offset;
	u32	is_complete;
};

#ifdef CONFIG_ARM64

#define R0_STR "x0"
@@ -137,40 +83,6 @@ struct scm_response {

#endif

/**
 * scm_command_to_response() - Get a pointer to a scm_response
 * @cmd: command
 *
 * Returns a pointer to a response for a command.
 */
static inline struct scm_response *scm_command_to_response(
		const struct scm_command *cmd)
{
	return (void *)cmd + cmd->resp_hdr_offset;
}

/**
 * scm_get_command_buffer() - Get a pointer to a command buffer
 * @cmd: command
 *
 * Returns a pointer to the command buffer of a command.
 */
static inline void *scm_get_command_buffer(const struct scm_command *cmd)
{
	return (void *)cmd->buf;
}

/**
 * scm_get_response_buffer() - Get a pointer to a response buffer
 * @rsp: response
 *
 * Returns a pointer to a response buffer of a response.
 */
static inline void *scm_get_response_buffer(const struct scm_response *rsp)
{
	return (void *)rsp + rsp->buf_offset;
}

static int scm_remap_error(int err)
{
	switch (err) {
@@ -191,193 +103,6 @@ static int scm_remap_error(int err)
	return -EINVAL;
}

static u32 smc(u32 cmd_addr)
{
	int context_id;

	register u32 r0 asm(R0_STR) = 1;
	register u32 r1 asm(R1_STR) = (uintptr_t)&context_id;
	register u32 r2 asm(R2_STR) = cmd_addr;
	do {
		asm volatile(
			__asmeq("%0", R0_STR)
			__asmeq("%1", R0_STR)
			__asmeq("%2", R1_STR)
			__asmeq("%3", R2_STR)
#ifdef REQUIRES_SEC
			".arch_extension sec\n"
#endif
			"smc	#0\n"
			: "=r" (r0)
			: "r" (r0), "r" (r1), "r" (r2)
			: R3_STR);
	} while (r0 == SCM_INTERRUPTED);

	return r0;
}

static int __scm_call(const struct scm_command *cmd)
{
	int ret;
	u32 cmd_addr = virt_to_phys(cmd);

	/*
	 * Flush the command buffer so that the secure world sees
	 * the correct data.
	 */
	__cpuc_flush_dcache_area((void *)cmd, cmd->len);
	outer_flush_range(cmd_addr, cmd_addr + cmd->len);

	ret = smc(cmd_addr);
	if (ret < 0) {
		if (ret != SCM_EBUSY)
			pr_err("scm_call failed with error code %d\n", ret);
		ret = scm_remap_error(ret);
	}
	return ret;
}

#ifndef CONFIG_ARM64
static void scm_inv_range(unsigned long start, unsigned long end)
{
	u32 cacheline_size, ctr;

	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
	cacheline_size = 4 << ((ctr >> 16) & 0xf);

	start = round_down(start, cacheline_size);
	end = round_up(end, cacheline_size);
	outer_inv_range(start, end);
	while (start < end) {
		asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
		     : "memory");
		start += cacheline_size;
	}
	dsb();
	isb();
}
#else

static void scm_inv_range(unsigned long start, unsigned long end)
{
	dmac_inv_range((void *)start, (void *)end);
}
#endif

/**
 * scm_call_common() - Send an SCM command
 * @svc_id: service identifier
 * @cmd_id: command identifier
 * @cmd_buf: command buffer
 * @cmd_len: length of the command buffer
 * @resp_buf: response buffer
 * @resp_len: length of the response buffer
 * @scm_buf: internal scm structure used for passing data
 * @scm_buf_len: length of the internal scm structure
 *
 * Core function to scm call. Initializes the given cmd structure with
 * appropriate values and makes the actual scm call. Validation of cmd
 * pointer and length must occur in the calling function.
 *
 * Returns the appropriate error code from the scm call
 */

static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
				size_t cmd_len, void *resp_buf, size_t resp_len,
				struct scm_command *scm_buf,
				size_t scm_buf_length)
{
	int ret;
	struct scm_response *rsp;
	unsigned long start, end;

	scm_buf->len = scm_buf_length;
	scm_buf->buf_offset = offsetof(struct scm_command, buf);
	scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len;
	scm_buf->id = (svc_id << 10) | cmd_id;

	if (cmd_buf)
		memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len);

	mutex_lock(&scm_lock);
	ret = __scm_call(scm_buf);
	mutex_unlock(&scm_lock);
	if (ret)
		return ret;

	rsp = scm_command_to_response(scm_buf);
	start = (unsigned long)rsp;

	do {
		scm_inv_range(start, start + sizeof(*rsp));
	} while (!rsp->is_complete);

	end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
	scm_inv_range(start, end);

	if (resp_buf)
		memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);

	return ret;
}

/*
 * Sometimes the secure world may be busy waiting for a particular resource.
 * In those situations, it is expected that the secure world returns a special
 * error code (SCM_EBUSY). Retry any scm_call that fails with this error code,
 * but with a timeout in place. Also, don't move this into scm_call_common,
 * since we want the first attempt to be the "fastpath".
 */
static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
				size_t cmd_len, void *resp_buf, size_t resp_len,
				struct scm_command *cmd,
				size_t len)
{
	int ret, retry_count = 0;

	do {
		ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
					resp_buf, resp_len, cmd, len);
		if (ret == SCM_EBUSY)
			msleep(SCM_EBUSY_WAIT_MS);
		if (retry_count == 33)
			pr_warn("scm: secure world has been busy for 1 second!\n");
	} while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));

	if (ret == SCM_EBUSY)
		pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");

	return ret;
}

/**
 * scm_call_noalloc - Send an SCM command
 *
 * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for
 * scm internal structures. The buffer should be allocated with
 * DEFINE_SCM_BUFFER to account for the proper alignment and size.
 */
int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
		size_t cmd_len, void *resp_buf, size_t resp_len,
		void *scm_buf, size_t scm_buf_len)
{
	int ret;
	size_t len = SCM_BUF_LEN(cmd_len, resp_len);

	if (len == 0)
		return -EINVAL;

	if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE))
		return -EINVAL;

	memset(scm_buf, 0, scm_buf_len);

	ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
				resp_len, scm_buf, len);
	return ret;

}

#ifdef CONFIG_ARM64

static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
@@ -541,7 +266,7 @@ static enum scm_interface_version {
/* This will be set to specify SMC32 or SMC64 */
static u32 scm_version_mask;

bool is_scm_armv8(void)
static bool is_scm_armv8(void)
{
	int ret;
	u64 ret1, x0;
@@ -580,7 +305,6 @@ bool is_scm_armv8(void)
	return (scm_version == SCM_ARMV8_32) ||
			(scm_version == SCM_ARMV8_64);
}
EXPORT_SYMBOL(is_scm_armv8);

/*
 * If there are more than N_REGISTER_ARGS, allocate a buffer and place
@@ -755,292 +479,7 @@ int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
		return scm_remap_error(ret);
	return ret;
}

/**
 * scm_call() - Send an SCM command
 * @svc_id: service identifier
 * @cmd_id: command identifier
 * @cmd_buf: command buffer
 * @cmd_len: length of the command buffer
 * @resp_buf: response buffer
 * @resp_len: length of the response buffer
 *
 * Sends a command to the SCM and waits for the command to finish processing.
 *
 * A note on cache maintenance:
 * Note that any buffers that are expected to be accessed by the secure world
 * must be flushed before invoking scm_call and invalidated in the cache
 * immediately after scm_call returns. Cache maintenance on the command and
 * response buffers is taken care of by scm_call; however, callers are
 * responsible for any other cached buffers passed over to the secure world.
 */
int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
		void *resp_buf, size_t resp_len)
{
	struct scm_command *cmd;
	int ret;
	size_t len = SCM_BUF_LEN(cmd_len, resp_len);

	if (len == 0 || PAGE_ALIGN(len) < len)
		return -EINVAL;

	cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
				resp_len, cmd, len);
	if (unlikely(ret == SCM_EBUSY))
		ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
				      resp_buf, resp_len, cmd, PAGE_ALIGN(len));
	kfree(cmd);
	return ret;
}
EXPORT_SYMBOL(scm_call);

#define SCM_CLASS_REGISTER	(0x2 << 8)
#define SCM_MASK_IRQS		BIT(5)
#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
				SCM_CLASS_REGISTER | \
				SCM_MASK_IRQS | \
				(n & 0xf))

/**
 * scm_call_atomic1() - Send an atomic SCM command with one argument
 * @svc_id: service identifier
 * @cmd_id: command identifier
 * @arg1: first argument
 *
 * This shall only be used with commands that are guaranteed to be
 * uninterruptable, atomic and SMP safe.
 */
s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
{
	int context_id;

	register u32 r0 asm(R0_STR) = SCM_ATOMIC(svc, cmd, 1);
	register u32 r1 asm(R1_STR) = (uintptr_t)&context_id;
	register u32 r2 asm(R2_STR) = arg1;

	asm volatile(
		__asmeq("%0", R0_STR)
		__asmeq("%1", R0_STR)
		__asmeq("%2", R1_STR)
		__asmeq("%3", R2_STR)
#ifdef REQUIRES_SEC
			".arch_extension sec\n"
#endif
		"smc	#0\n"
		: "=r" (r0)
		: "r" (r0), "r" (r1), "r" (r2)
		: R3_STR);
	return r0;
}
EXPORT_SYMBOL(scm_call_atomic1);

/**
 * scm_call_atomic1_1() - SCM command with one argument and one return value
 * @svc_id: service identifier
 * @cmd_id: command identifier
 * @arg1: first argument
 * @ret1: first return value
 *
 * This shall only be used with commands that are guaranteed to be
 * uninterruptable, atomic and SMP safe.
 */
s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
{
	int context_id;

	register u32 r0 asm(R0_STR) = SCM_ATOMIC(svc, cmd, 1);
	register u32 r1 asm(R1_STR) = (uintptr_t)&context_id;
	register u32 r2 asm(R2_STR) = arg1;

	asm volatile(
		__asmeq("%0", R0_STR)
		__asmeq("%1", R1_STR)
		__asmeq("%2", R0_STR)
		__asmeq("%3", R1_STR)
		__asmeq("%4", R2_STR)
#ifdef REQUIRES_SEC
			".arch_extension sec\n"
#endif
		"smc	#0\n"
		: "=r" (r0), "=r" (r1)
		: "r" (r0), "r" (r1), "r" (r2)
		: R3_STR);
	if (ret1)
		*ret1 = r1;
	return r0;
}
EXPORT_SYMBOL(scm_call_atomic1_1);

/**
 * scm_call_atomic2() - Send an atomic SCM command with two arguments
 * @svc_id: service identifier
 * @cmd_id: command identifier
 * @arg1: first argument
 * @arg2: second argument
 *
 * This shall only be used with commands that are guaranteed to be
 * uninterruptable, atomic and SMP safe.
 */
s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
{
	int context_id;

	register u32 r0 asm(R0_STR) = SCM_ATOMIC(svc, cmd, 2);
	register u32 r1 asm(R1_STR) = (uintptr_t)&context_id;
	register u32 r2 asm(R2_STR) = arg1;
	register u32 r3 asm(R3_STR) = arg2;

	asm volatile(
		__asmeq("%0", R0_STR)
		__asmeq("%1", R0_STR)
		__asmeq("%2", R1_STR)
		__asmeq("%3", R2_STR)
		__asmeq("%4", R3_STR)
#ifdef REQUIRES_SEC
			".arch_extension sec\n"
#endif
		"smc	#0\n"
		: "=r" (r0)
		: "r" (r0), "r" (r1), "r" (r2), "r" (r3));
	return r0;
}
EXPORT_SYMBOL(scm_call_atomic2);

/**
 * scm_call_atomic3() - Send an atomic SCM command with three arguments
 * @svc_id: service identifier
 * @cmd_id: command identifier
 * @arg1: first argument
 * @arg2: second argument
 * @arg3: third argument
 *
 * This shall only be used with commands that are guaranteed to be
 * uninterruptable, atomic and SMP safe.
 */
s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3)
{
	int context_id;

	register u32 r0 asm(R0_STR) = SCM_ATOMIC(svc, cmd, 3);
	register u32 r1 asm(R1_STR) = (uintptr_t)&context_id;
	register u32 r2 asm(R2_STR) = arg1;
	register u32 r3 asm(R3_STR) = arg2;
	register u32 r4 asm(R4_STR) = arg3;

	asm volatile(
		__asmeq("%0", R0_STR)
		__asmeq("%1", R0_STR)
		__asmeq("%2", R1_STR)
		__asmeq("%3", R2_STR)
		__asmeq("%4", R3_STR)
		__asmeq("%5", R4_STR)
#ifdef REQUIRES_SEC
			".arch_extension sec\n"
#endif
		"smc	#0\n"
		: "=r" (r0)
		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4));
	return r0;
}
EXPORT_SYMBOL(scm_call_atomic3);

s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
		u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
{
	int ret;
	int context_id;

	register u32 r0 asm(R0_STR) = SCM_ATOMIC(svc, cmd, 4);
	register u32 r1 asm(R1_STR) = (uintptr_t)&context_id;
	register u32 r2 asm(R2_STR) = arg1;
	register u32 r3 asm(R3_STR) = arg2;
	register u32 r4 asm(R4_STR) = arg3;
	register u32 r5 asm(R5_STR) = arg4;

	asm volatile(
		__asmeq("%0", R0_STR)
		__asmeq("%1", R1_STR)
		__asmeq("%2", R2_STR)
		__asmeq("%3", R0_STR)
		__asmeq("%4", R1_STR)
		__asmeq("%5", R2_STR)
		__asmeq("%6", R3_STR)
#ifdef REQUIRES_SEC
			".arch_extension sec\n"
#endif
		"smc	#0\n"
		: "=r" (r0), "=r" (r1), "=r" (r2)
		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
	ret = r0;
	if (ret1)
		*ret1 = r1;
	if (ret2)
		*ret2 = r2;
	return r0;
}
EXPORT_SYMBOL(scm_call_atomic4_3);

/**
 * scm_call_atomic5_3() - SCM command with five argument and three return value
 * @svc_id: service identifier
 * @cmd_id: command identifier
 * @arg1: first argument
 * @arg2: second argument
 * @arg3: third argument
 * @arg4: fourth argument
 * @arg5: fifth argument
 * @ret1: first return value
 * @ret2: second return value
 * @ret3: third return value
 *
 * This shall only be used with commands that are guaranteed to be
 * uninterruptable, atomic and SMP safe.
 */
s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
	u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
{
	int ret;
	int context_id;

	register u32 r0 asm(R0_STR) = SCM_ATOMIC(svc, cmd, 5);
	register u32 r1 asm(R1_STR) = (uintptr_t)&context_id;
	register u32 r2 asm(R2_STR) = arg1;
	register u32 r3 asm(R3_STR) = arg2;
	register u32 r4 asm(R4_STR) = arg3;
	register u32 r5 asm(R5_STR) = arg4;
	register u32 r6 asm(R6_STR) = arg5;

	asm volatile(
		__asmeq("%0", R0_STR)
		__asmeq("%1", R1_STR)
		__asmeq("%2", R2_STR)
		__asmeq("%3", R3_STR)
		__asmeq("%4", R0_STR)
		__asmeq("%5", R1_STR)
		__asmeq("%6", R2_STR)
		__asmeq("%7", R3_STR)
#ifdef REQUIRES_SEC
			".arch_extension sec\n"
#endif
		"smc	#0\n"
		: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5),
		 "r" (r6));
	ret = r0;

	if (ret1)
		*ret1 = r1;
	if (ret2)
		*ret2 = r2;
	if (ret3)
		*ret3 = r3;
	return r0;
}
EXPORT_SYMBOL(scm_call_atomic5_3);
EXPORT_SYMBOL(scm_call2_atomic);

u32 scm_get_version(void)
{
@@ -1084,34 +523,27 @@ EXPORT_SYMBOL(scm_get_version);

u32 scm_io_read(phys_addr_t address)
{
	if (!is_scm_armv8()) {
		return scm_call_atomic1(SCM_SVC_IO, SCM_IO_READ, address);
	} else {
	struct scm_desc desc = {
		.args[0] = address,
		.arginfo = SCM_ARGS(1),
	};

	scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_READ), &desc);
	return desc.ret[0];
}
}
EXPORT_SYMBOL(scm_io_read);

int scm_io_write(phys_addr_t address, u32 val)
{
	int ret;

	if (!is_scm_armv8()) {
		ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
	} else {
	struct scm_desc desc = {
		.args[0] = address,
		.args[1] = val,
		.arginfo = SCM_ARGS(2),
	};

	ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE),
					&desc);
	}
	return ret;
}
EXPORT_SYMBOL(scm_io_write);
@@ -1121,17 +553,6 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id)
	int ret;
	struct scm_desc desc = {0};

	if (!is_scm_armv8()) {
		u32 ret_val = 0;
		u32 svc_cmd = (svc_id << 10) | cmd_id;

		ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
			sizeof(svc_cmd), &ret_val, sizeof(ret_val));
		if (ret)
			return ret;

		return ret_val;
	}
	desc.arginfo = SCM_ARGS(1);
	desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
@@ -1148,17 +569,6 @@ int scm_get_feat_version(u32 feat)
	struct scm_desc desc = {0};
	int ret;

	if (!is_scm_armv8()) {
		if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
			u32 version;

			if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
				      sizeof(feat), &version, sizeof(version)))
				return version;
		}
		return 0;
	}

	ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
	if (ret <= 0)
		return 0;
@@ -1179,21 +589,10 @@ int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
{
	struct scm_desc desc = {0};
	int ret;
	struct restore_sec_cfg {
		u32 device_id;
		u32 spare;
	} cfg;

	cfg.device_id = device_id;
	cfg.spare = spare;

	if (IS_ERR_OR_NULL(scm_ret))
		return -EINVAL;

	if (!is_scm_armv8())
		return scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &cfg, sizeof(cfg),
				scm_ret, sizeof(*scm_ret));

	desc.args[0] = device_id;
	desc.args[1] = spare;
	desc.arginfo = SCM_ARGS(2);
@@ -1221,15 +620,10 @@ bool scm_is_secure_device(void)

	desc.args[0] = 0;
	desc.arginfo = 0;
	if (!is_scm_armv8()) {
		ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL,
			0, &resp, sizeof(resp));
	} else {
	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
			TZ_INFO_GET_SECURE_STATE),
			&desc);
	resp = desc.ret[0];
	}

	if (ret) {
		pr_err("%s: SCM call failed\n", __func__);
+2 −74
Original line number Diff line number Diff line
/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -95,55 +95,23 @@ struct scm_desc {
};

#ifdef CONFIG_QCOM_SCM
extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
		void *resp_buf, size_t resp_len);

#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
extern int scm_call2(u32 cmd_id, struct scm_desc *desc);

extern int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc);

extern int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
		size_t cmd_len, void *resp_buf, size_t resp_len,
		void *scm_buf, size_t scm_buf_size);


extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1);
extern s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1);
extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2);
extern s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3);
extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
		u32 arg4, u32 *ret1, u32 *ret2);
extern s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
		u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3);

#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))

extern u32 scm_get_version(void);
extern int scm_is_call_available(u32 svc_id, u32 cmd_id);
extern int scm_get_feat_version(u32 feat);
extern bool is_scm_armv8(void);
extern int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret);
extern u32 scm_io_read(phys_addr_t address);
extern int scm_io_write(phys_addr_t address, u32 val);
extern bool scm_is_secure_device(void);

#define SCM_HDCP_MAX_REG 5

struct scm_hdcp_req {
	u32 addr;
	u32 val;
};

extern struct mutex scm_lmh_lock;

#else

static inline int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
		size_t cmd_len, void *resp_buf, size_t resp_len)
{
	return 0;
}

static inline int scm_call2(u32 cmd_id, struct scm_desc *desc)
{
	return 0;
@@ -154,46 +122,6 @@ static inline int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc)
	return 0;
}

static inline int scm_call_noalloc(u32 svc_id, u32 cmd_id,
		const void *cmd_buf, size_t cmd_len, void *resp_buf,
		size_t resp_len, void *scm_buf, size_t scm_buf_size)
{
	return 0;
}

static inline s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
{
	return 0;
}

static inline s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
{
	return 0;
}

static inline s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
{
	return 0;
}

static inline s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
		u32 arg3)
{
	return 0;
}

static inline s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
		u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
{
	return 0;
}

static inline s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
	u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
{
	return 0;
}

static inline u32 scm_get_version(void)
{
	return 0;