Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cdd72fdb authored by Yong Ding's avatar Yong Ding
Browse files

soc: qcom: add HAB driver



Add snapshot for HAB from msm-4.14 commit b1ff2848541a
 (soc: qcom: hab: fix memory leak).

Change-Id: I88cdfd2c3530320b80a64f294b59611d4a80cc08
Signed-off-by: default avatarYong Ding <yongding@codeaurora.org>
parent 5bf38f99
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -601,6 +601,8 @@ config QCOM_GLINK_PKT
	  This enable the userspace clients to read and write to
	  some glink packets channel.

source "drivers/soc/qcom/hab/Kconfig"

config MSM_PERFORMANCE
        tristate "msm performance driver to support userspace fmin/fmax request"
        help
+1 −0
Original line number Diff line number Diff line
@@ -90,6 +90,7 @@ obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm.o crypto-qti-hwkm.o
obj-$(CONFIG_QCOM_WDT_CORE) += qcom_wdt_core.o
obj-$(CONFIG_QCOM_SOC_WATCHDOG) += qcom_soc_wdt.o
obj-$(CONFIG_MSM_HAB) += hab/
ifdef CONFIG_DEBUG_FS
obj-$(CONFIG_MSM_RPM_SMD)   +=  rpm-smd-debug.o
endif
+22 −0
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0-only
#
# HAB configuration
#
config MSM_HAB
	bool "Enable Hypervisor ABstraction Layer"
	select WANT_DEV_COREDUMP
	help
	  HAB(Hypervisor ABstraction) driver can provide the message
	  transmission and memory sharing services among different OSes.
	  Internally, HAB makes use of some specific communication mechanism
	  provided by the underlying hypervisor.
	  It is required by the virtualization support for some multimedia
	  and platform devices in MSM devices.

config MSM_HAB_DEFAULT_VMID
	int
	default 2
	help
	  The default HAB VMID.
	  It will not be used when there are some other configuration sources,
	  e.g., device tree.
+39 −0
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0-only
msm_hab-objs = \
	hab.o \
	hab_msg.o \
	hab_vchan.o \
	hab_pchan.o \
	hab_open.o \
	hab_mimex.o \
	hab_pipe.o \
	hab_parser.o \
	hab_stat.o

msm_hab_linux-objs = \
	khab.o \
	hab_linux.o \
	hab_mem_linux.o \
	khab_test.o

ifdef CONFIG_GHS_VMM
msm_hab_hyp-objs = \
	ghs_comm.o \
	ghs_comm_linux.o \
	hab_ghs.o \
	hab_ghs_linux.o
else
ifdef CONFIG_QTI_QUIN_GVM
msm_hab_hyp-objs = \
	qvm_comm.o \
	qvm_comm_linux.o \
	hab_qvm.o \
	hab_qvm_linux.o
else
msm_hab_hyp-objs = \
	hab_comm.o \
	hyp_stub.o
endif
endif

obj-$(CONFIG_MSM_HAB) += msm_hab.o msm_hab_linux.o msm_hab_hyp.o
+122 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 */
#include "hab.h"
#include "hab_ghs.h"

int physical_channel_read(struct physical_channel *pchan,
		void *payload,
		size_t read_size)
{
	struct ghs_vdev *dev  = (struct ghs_vdev *)pchan->hyp_data;

	/* size in header is only for payload excluding the header itself */
	if (dev->read_size < read_size + sizeof(struct hab_header)) {
		pr_warn("read %zd is less than requested %zd plus header %zd\n",
			dev->read_size, read_size, sizeof(struct hab_header));
		read_size = dev->read_size;
	}

	/* always skip the header */
	memcpy(payload, (unsigned char *)dev->read_data +
		sizeof(struct hab_header) + dev->read_offset, read_size);
	dev->read_offset += read_size;

	return read_size;
}

int physical_channel_send(struct physical_channel *pchan,
		struct hab_header *header,
		void *payload)
{
	size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
	struct ghs_vdev *dev  = (struct ghs_vdev *)pchan->hyp_data;
	GIPC_Result result;
	uint8_t *msg;
	int irqs_disabled = irqs_disabled();

	hab_spin_lock(&dev->io_lock, irqs_disabled);

	result = hab_gipc_wait_to_send(dev->endpoint);
	if (result != GIPC_Success) {
		hab_spin_unlock(&dev->io_lock, irqs_disabled);
		pr_err("failed to wait to send %d\n", result);
		return -EBUSY;
	}

	result = GIPC_PrepareMessage(dev->endpoint, sizebytes+sizeof(*header),
		(void **)&msg);
	if (result == GIPC_Full) {
		hab_spin_unlock(&dev->io_lock, irqs_disabled);
		/* need to wait for space! */
		pr_err("failed to reserve send msg for %zd bytes\n",
			sizebytes+sizeof(*header));
		return -EBUSY;
	} else if (result != GIPC_Success) {
		hab_spin_unlock(&dev->io_lock, irqs_disabled);
		pr_err("failed to send due to error %d\n", result);
		return -ENOMEM;
	}

	if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
		struct timespec64 ts = {0};
		struct habmm_xing_vm_stat *pstat =
					(struct habmm_xing_vm_stat *)payload;

		ktime_get_ts64(&ts);
		pstat->tx_sec = ts.tv_sec;
		pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
	}

	memcpy(msg, header, sizeof(*header));

	if (sizebytes)
		memcpy(msg+sizeof(*header), payload, sizebytes);

	result = GIPC_IssueMessage(dev->endpoint, sizebytes+sizeof(*header),
		header->id_type_size);
	hab_spin_unlock(&dev->io_lock, irqs_disabled);
	if (result != GIPC_Success) {
		pr_err("send error %d, sz %zd, prot %x\n",
			result, sizebytes+sizeof(*header),
			   header->id_type_size);
		return -EAGAIN;
	}

	return 0;
}

void physical_channel_rx_dispatch_common(unsigned long physical_channel)
{
	struct hab_header header;
	struct physical_channel *pchan =
		(struct physical_channel *)physical_channel;
	struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
	GIPC_Result result;
	int irqs_disabled = irqs_disabled();

	hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
	while (1) {
		dev->read_size = 0;
		dev->read_offset = 0;
		result = GIPC_ReceiveMessage(dev->endpoint,
				dev->read_data,
				GIPC_RECV_BUFF_SIZE_BYTES,
				&dev->read_size,
				&header.id_type_size);

		if (result == GIPC_Success || dev->read_size > 0) {
			 /* handle corrupted msg? */
			hab_msg_recv(pchan, dev->read_data);
			continue;
		} else if (result == GIPC_Empty) {
			/* no more pending msg */
			break;
		}
		pr_err("recv unhandled result %d, size %zd\n",
			result, dev->read_size);
		break;
	}
	hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
}
Loading