Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 31ef58ab authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "haven: add stubs to haven client exposed APIs"

parents 885277a7 32637a59
Loading
Loading
Loading
Loading
+58 −14
Original line number Diff line number Diff line
@@ -20,12 +20,25 @@

#include "hvc_console.h"

/*
 * Note: hvc_alloc follows first-come, first-served for assigning
 * numbers to registered hvc instances. Thus, the following assignments occur
 * when both DCC and HAVEN consoles are compiled:
 *            | DCC connected | DCC not connected
 *      (dcc) |      hvc0     | (not present)
 *       SELF |      hvc1     | hvc0
 * PRIMARY_VM |      hvc2     | hvc1
 * TRUSTED_VM |      hvc3     | hvc2
 * "DCC connected" means a DCC terminal is open with device
 */

#define HVC_HH_VTERM_COOKIE	0x474E5948
/* # of payload bytes that can fit in a 1-fragment CONSOLE_WRITE message */
#define HH_HVC_WRITE_MSG_SIZE	((1 * (HH_MSGQ_MAX_MSG_SIZE_BYTES - 8)) - 4)

struct hh_hvc_prv {
	struct hvc_struct *hvc;
	enum hh_vm_names vm_name;
	DECLARE_KFIFO(get_fifo, char, 1024);
	DECLARE_KFIFO(put_fifo, char, 1024);
	struct work_struct put_work;
@@ -82,12 +95,11 @@ static void hh_hvc_put_work_fn(struct work_struct *ws)
	char buf[HH_HVC_WRITE_MSG_SIZE];
	int count, ret;
	struct hh_hvc_prv *prv = container_of(ws, struct hh_hvc_prv, put_work);
	enum hh_vm_names vm_name = vtermno_to_hh_vm_name(prv->hvc->vtermno);

	ret = hh_rm_get_vmid(vm_name, &vmid);
	ret = hh_rm_get_vmid(prv->vm_name, &vmid);
	if (ret) {
		pr_warn_once("hh_rm_get_vmid failed for %d: %d\n",
			     vm_name, ret);
		pr_warn_once("%s: hh_rm_get_vmid failed for %d: %d\n",
			     __func__, prv->vm_name, ret);
		return;
	}

@@ -99,8 +111,8 @@ static void hh_hvc_put_work_fn(struct work_struct *ws)

		ret = hh_rm_console_write(vmid, buf, count);
		if (ret) {
			pr_warn_once("hh_rm_console_write failed for %d: %d\n",
				vm_name, ret);
			pr_warn_once("%s hh_rm_console_write failed for %d: %d\n",
				__func__, prv->vm_name, ret);
			break;
		}
	}
@@ -136,6 +148,11 @@ static int hh_hvc_flush(uint32_t vtermno, bool wait)
	int ret, vm_name = vtermno_to_hh_vm_name(vtermno);
	hh_vmid_t vmid;

	/* RM calls will all sleep. A flush without waiting isn't possible */
	if (!wait)
		return 0;
	might_sleep();

	if (vm_name < 0 || vm_name >= HH_VM_MAX)
		return -EINVAL;

@@ -157,8 +174,11 @@ static int hh_hvc_notify_add(struct hvc_struct *hp, int vm_name)
	hh_vmid_t vmid;

	ret = hh_rm_get_vmid(vm_name, &vmid);
	if (ret)
	if (ret) {
		pr_err("%s: hh_rm_get_vmid failed for %d: %d\n", __func__,
			vm_name, ret);
		return ret;
	}

	return hh_rm_console_open(vmid);
}
@@ -183,7 +203,8 @@ static void hh_hvc_notify_del(struct hvc_struct *hp, int vm_name)
	ret = hh_rm_console_close(vmid);

	if (ret)
		pr_err("Failed close VM%d console - %d\n", vm_name, ret);
		pr_err("%s: failed close VM%d console - %d\n", __func__,
			vm_name, ret);

	kfifo_reset(&hh_hvc_data[vm_name].get_fifo);
}
@@ -205,12 +226,24 @@ static int __init hvc_hh_console_init(void)
{
	int ret;

	ret = hvc_instantiate(hh_vm_name_to_vtermno(HH_PRIMARY_VM), 0,
	ret = hvc_instantiate(hh_vm_name_to_vtermno(HH_SELF_VM), 0,
			      &hh_hv_ops);

	return ret < 0 ? -ENODEV : 0;
}
console_initcall(hvc_hh_console_init);

static void __init hh_hvc_console_post_init(void)
{
	/* Need to call RM CONSOLE_OPEN before console can be used */
	hh_hvc_notify_add(hh_hvc_data[HH_SELF_VM].hvc, HH_SELF_VM);
}
#else
static int __init hvc_hh_console_init(void)
{
	return 0;
}

static void __init hh_hvc_console_post_init(void) { }
#endif /* CONFIG_HVC_HAVEN_CONSOLE */

static int __init hvc_hh_init(void)
@@ -218,11 +251,20 @@ static int __init hvc_hh_init(void)
	int i, ret = 0;
	struct hh_hvc_prv *prv;

	/* Must initialize fifos and work before calling hvc_hh_console_init */
	for (i = 0; i < HH_VM_MAX; i++) {
		prv = &hh_hvc_data[i];
		prv->vm_name = i;
		INIT_KFIFO(prv->get_fifo);
		INIT_KFIFO(prv->put_fifo);
		INIT_WORK(&prv->put_work, hh_hvc_put_work_fn);
	}

	/* Must instantiate console before calling hvc_alloc */
	hvc_hh_console_init();

	for (i = 0; i < HH_VM_MAX; i++) {
		prv = &hh_hvc_data[i];
		prv->hvc = hvc_alloc(hh_vm_name_to_vtermno(i), i, &hh_hv_ops,
				     256);
		ret = PTR_ERR_OR_ZERO(prv->hvc);
@@ -234,27 +276,29 @@ static int __init hvc_hh_init(void)
	if (ret)
		goto bail;

	hh_hvc_console_post_init();

	return 0;
bail:
	for (; i >= 0; i--) {
	for (--i; i >= 0; i--) {
		hvc_remove(hh_hvc_data[i].hvc);
		hh_hvc_data[i].hvc = NULL;
	}
	return ret;
}
device_initcall(hvc_hh_init);
late_initcall(hvc_hh_init);

static __exit void hvc_hh_exit(void)
{
	int i;

	hh_rm_unregister_notifier(&hh_hvc_nb);

	for (i = 0; i < HH_VM_MAX; i++)
		if (hh_hvc_data[i].hvc) {
			hvc_remove(hh_hvc_data[i].hvc);
			hh_hvc_data[i].hvc = NULL;
		}

	hh_rm_unregister_notifier(&hh_hvc_nb);
}
module_exit(hvc_hh_exit);

+9 −0
Original line number Diff line number Diff line
@@ -47,4 +47,13 @@ config HH_DBL
	  these doorbells by calling send and/or a receive primitives exposed by
	  driver and trigger an interrupt to each other and exchange the data.

config HH_IRQ_LEND
	tristate "Haven IRQ Lending Framework"
	depends on HH_RM_DRV
	help
	  Haven Resource Manager permits interrupts to be shared between
	  virtual machines. This config enables a framework which
	  supports sharing these interrupts. The follows RM recommended
	  protocol.

endif
+1 −0
Original line number Diff line number Diff line
@@ -3,3 +3,4 @@ obj-$(CONFIG_HH_MSGQ) += hh_msgq.o
obj-$(CONFIG_HH_RM_DRV)		+= hh_rm_drv.o
hh_rm_drv-y 			:= hh_rm_core.o hh_rm_iface.o
obj-$(CONFIG_HH_DBL)		+= hh_dbl.o
obj-$(CONFIG_HH_IRQ_LEND)	+= hh_irq_lend.o
+304 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2020, The Linux Foundation. All rights reserved.
 *
 */

#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/haven/hh_irq_lend.h>
#include <linux/haven/hh_rm_drv.h>
#include <linux/spinlock.h>

struct hh_irq_entry {
	hh_vmid_t vmid;
	hh_irq_handle_fn handle;
	void *data;

	enum {
		/* start state */
		HH_IRQ_STATE_NONE,
		/* NONE -> WAIT_RELEASE by hh_irq_lend */
		HH_IRQ_STATE_WAIT_RELEASE,
		/* NONE -> WAIT_LEND by hh_irq_wait_lend */
		HH_IRQ_STATE_WAIT_LEND,
		/* WAIT_RELEASE -> RELEASED by notifier */
		/* RELEASED -> NONE by hh_irq_reclaim */
		HH_IRQ_STATE_RELEASED,
		/* WAIT_LEND -> LENT by notifier */
		/* LENT -> NONE by hh_irq_release */
		HH_IRQ_STATE_LENT,
	} state;
	hh_virq_handle_t virq_handle;
};

static struct hh_irq_entry hh_irq_entries[HH_IRQ_LABEL_MAX];
static DEFINE_SPINLOCK(hh_irq_lend_lock);

static int hh_irq_released_nb_handler(struct notifier_block *this,
				      unsigned long cmd, void *data)
{
	unsigned long flags;
	enum hh_irq_label label;
	struct hh_irq_entry *entry;
	struct hh_rm_notif_vm_irq_released_payload *released = data;

	if (cmd != HH_RM_NOTIF_VM_IRQ_RELEASED)
		return NOTIFY_DONE;

	spin_lock_irqsave(&hh_irq_lend_lock, flags);
	for (label = 0; label < HH_IRQ_LABEL_MAX; label++) {
		entry = &hh_irq_entries[label];
		if (entry->state != HH_IRQ_STATE_WAIT_RELEASE)
			continue;

		if (released->virq_handle == entry->virq_handle) {
			entry->state = HH_IRQ_STATE_RELEASED;
			spin_unlock_irqrestore(&hh_irq_lend_lock,
				flags);

			entry->handle(entry->data, label);

			return NOTIFY_OK;
		}
	}
	spin_unlock_irqrestore(&hh_irq_lend_lock, flags);

	return NOTIFY_DONE;
}

static struct notifier_block hh_irq_released_nb = {
	.notifier_call = hh_irq_released_nb_handler,
};

static int hh_irq_lent_nb_handler(struct notifier_block *this,
				  unsigned long cmd, void *data)
{
	unsigned long flags;
	enum hh_irq_label label;
	struct hh_irq_entry *entry;
	struct hh_rm_notif_vm_irq_lent_payload *lent = data;

	if (cmd != HH_RM_NOTIF_VM_IRQ_LENT)
		return NOTIFY_DONE;

	spin_lock_irqsave(&hh_irq_lend_lock, flags);
	for (label = 0; label < HH_IRQ_LABEL_MAX; label++) {
		entry = &hh_irq_entries[label];
		if (entry->state != HH_IRQ_STATE_WAIT_LEND)
			continue;

		if (label == lent->virq_label &&
		    (entry->vmid == HH_VM_MAX ||
		     entry->vmid == lent->owner_vmid)) {
			entry->vmid = lent->owner_vmid;
			entry->virq_handle = lent->virq_handle;

			entry->state = HH_IRQ_STATE_LENT;
			spin_unlock_irqrestore(&hh_irq_lend_lock,
					       flags);

			entry->handle(entry->data, label);

			return NOTIFY_OK;
		}
	}
	spin_unlock_irqrestore(&hh_irq_lend_lock, flags);

	return NOTIFY_DONE;
}

static struct notifier_block hh_irq_lent_nb = {
	.notifier_call = hh_irq_lent_nb_handler,
};

/**
 * hh_irq_lend: Lend a hardware interrupt to another VM
 * @label: vIRQ high-level label
 * @name: VM name to send interrupt to
 * @hw_irq: Hardware IRQ number to lend
 * @on_release: callback to invoke when other VM returns the
 *              interrupt
 * @data: Argument to pass to on_release
 */
int hh_irq_lend(enum hh_irq_label label, enum hh_vm_names name,
		int hw_irq, hh_irq_handle_fn on_release, void *data)
{
	int ret;
	unsigned long flags;
	struct hh_irq_entry *entry;

	if (label >= HH_IRQ_LABEL_MAX || !on_release)
		return -EINVAL;

	entry = &hh_irq_entries[label];

	spin_lock_irqsave(&hh_irq_lend_lock, flags);
	if (entry->state != HH_IRQ_STATE_NONE) {
		spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
		return -EINVAL;
	}

	ret = hh_rm_get_vmid(name, &entry->vmid);
	if (ret) {
		entry->state = HH_IRQ_STATE_NONE;
		spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
		return ret;
	}

	entry->handle = on_release;
	entry->data = data;
	entry->state = HH_IRQ_STATE_WAIT_RELEASE;
	spin_unlock_irqrestore(&hh_irq_lend_lock, flags);

	return hh_rm_vm_irq_lend_notify(entry->vmid, hw_irq, label,
		&entry->virq_handle);
}
EXPORT_SYMBOL(hh_irq_lend);

/**
 * hh_irq_reclaim: Reclaim a hardware interrupt after other VM
 * has released.
 * @label: vIRQ high-level label
 *
 * This function should be called inside or after on_release()
 * callback from hh_irq_lend.
 * This function is not thread-safe. Do not race with another hh_irq_reclaim
 * with same label
 */
int hh_irq_reclaim(enum hh_irq_label label)
{
	int ret;
	struct hh_irq_entry *entry;

	if (label >= HH_IRQ_LABEL_MAX)
		return -EINVAL;

	entry = &hh_irq_entries[label];

	if (entry->state != HH_IRQ_STATE_RELEASED)
		return -EINVAL;

	ret = hh_rm_vm_irq_reclaim(entry->virq_handle);
	if (!ret)
		entry->state = HH_IRQ_STATE_NONE;
	return ret;
}
EXPORT_SYMBOL(hh_irq_reclaim);

/**
 * hh_irq_wait_lend: Register to claim a lent interrupt from another
 * VM
 * @label: vIRQ high-level label
 * @name: Lender's VM name. If don't care, then use HH_VM_MAX
 * @on_lend: callback to invoke when other VM lends the interrupt
 * @data: Argument to pass to on_lend
 */
int hh_irq_wait_for_lend(enum hh_irq_label label, enum hh_vm_names name,
			 hh_irq_handle_fn on_lend, void *data)
{
	int ret;
	unsigned long flags;
	struct hh_irq_entry *entry;

	if (label >= HH_IRQ_LABEL_MAX || !on_lend)
		return -EINVAL;

	entry = &hh_irq_entries[label];

	spin_lock_irqsave(&hh_irq_lend_lock, flags);
	if (entry->state != HH_IRQ_STATE_NONE) {
		spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
		return -EINVAL;
	}

	ret = hh_rm_get_vmid(name, &entry->vmid);
	if (ret) {
		entry->state = HH_IRQ_STATE_NONE;
		spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
		return ret;
	}

	entry->handle = on_lend;
	entry->data = data;
	entry->state = HH_IRQ_STATE_WAIT_LEND;
	spin_unlock_irqrestore(&hh_irq_lend_lock, flags);

	return ret;
}
EXPORT_SYMBOL(hh_irq_wait_for_lend);

/**
 * hh_irq_accept: Register to receive interrupts with a lent vIRQ
 * @label: vIRQ high-level label
 * @hw_irq: HWIRQ# to associate vIRQ with. If don't care, use -1
 *
 * If hw_irq is not -1, then returns 0 on success, <0 otherwise
 * If hw_irq is -1, then returns the HWIRQ# that vIRQ was registered
 * to or <0 for error.
 * This function is not thread-safe w.r.t. IRQ lend state. Do not race with
 * with hh_irq_release or another hh_irq_accept with same label.
 */
int hh_irq_accept(enum hh_irq_label label, int hw_irq)
{
	struct hh_irq_entry *entry;

	if (label >= HH_IRQ_LABEL_MAX)
		return -EINVAL;

	entry = &hh_irq_entries[label];

	if (entry->state != HH_IRQ_STATE_LENT)
		return -EINVAL;

	return hh_rm_vm_irq_accept(entry->virq_handle, hw_irq);
}
EXPORT_SYMBOL(hh_irq_accept);

/**
 * hh_irq_release: Release a lent interrupt
 * @label: vIRQ high-level label
 * This function is not thread-safe w.r.t. IRQ lend state. Do not race with
 * with hh_irq_accept or another hh_irq_release with same label.
 */
int hh_irq_release(enum hh_irq_label label)
{
	int ret;
	struct hh_irq_entry *entry;

	if (label >= HH_IRQ_LABEL_MAX)
		return -EINVAL;

	entry = &hh_irq_entries[label];

	if (entry->state != HH_IRQ_STATE_LENT)
		return -EINVAL;

	ret = hh_rm_vm_irq_release_notify(entry->vmid,
					  entry->virq_handle);
	if (!ret)
		entry->state = HH_IRQ_STATE_NONE;
	return ret;
}
EXPORT_SYMBOL(hh_irq_release);

static int __init hh_irq_lend_init(void)
{
	int ret;

	ret = hh_rm_register_notifier(&hh_irq_lent_nb);
	if (ret)
		return ret;
	return hh_rm_register_notifier(&hh_irq_released_nb);
}
module_init(hh_irq_lend_init);

static void hh_irq_lend_exit(void)
{
	hh_rm_unregister_notifier(&hh_irq_lent_nb);
	hh_rm_unregister_notifier(&hh_irq_released_nb);
}
module_exit(hh_irq_lend_exit);

MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven IRQ Lending Library");
+14 −2
Original line number Diff line number Diff line
@@ -89,6 +89,7 @@ static int __hh_msgq_recv(struct hh_msgq_cap_table *cap_table_entry,
	switch (hh_ret) {
	case HH_ERROR_OK:
		*recv_size = resp.recv_size;
		cap_table_entry->rx_empty = !resp.not_empty;
		ret = 0;
		break;
	case HH_ERROR_MSGQUEUE_EMPTY:
@@ -102,7 +103,7 @@ static int __hh_msgq_recv(struct hh_msgq_cap_table *cap_table_entry,
	spin_unlock_irqrestore(&cap_table_entry->rx_lock, flags);

	if (ret != 0 && ret != -EAGAIN)
		pr_err("%s: Failed to recv the message. Error: %d\n",
		pr_err("%s: Failed to recv from msgq. Hypercall error: %d\n",
			__func__, hh_ret);

	return ret;
@@ -189,6 +190,10 @@ int hh_msgq_recv(void *msgq_client_desc,
					recv_size, flags);
	} while (ret == -EAGAIN);

	if (!ret)
		print_hex_dump_debug("hh_msgq_recv: ", DUMP_PREFIX_OFFSET,
				     4, 1, buff, *recv_size, false);

	return ret;

err:
@@ -208,11 +213,18 @@ static int __hh_msgq_send(struct hh_msgq_cap_table *cap_table_entry,
	/* Discard the driver specific flags, and keep only HVC specifics */
	tx_flags &= HH_MSGQ_HVC_FLAGS_MASK;

	print_hex_dump_debug("hh_msgq_send: ", DUMP_PREFIX_OFFSET,
			     4, 1, buff, size, false);

	spin_lock_irqsave(&cap_table_entry->tx_lock, flags);
	hh_ret = hh_hcall_msgq_send(cap_table_entry->tx_cap_id,
					size, buff, tx_flags, &resp);

	switch (hh_ret) {
	case HH_ERROR_OK:
		cap_table_entry->tx_full = !resp.not_full;
		ret = 0;
		break;
	case HH_ERROR_MSGQUEUE_FULL:
		cap_table_entry->tx_full = true;
		ret = -EAGAIN;
@@ -224,7 +236,7 @@ static int __hh_msgq_send(struct hh_msgq_cap_table *cap_table_entry,
	spin_unlock_irqrestore(&cap_table_entry->tx_lock, flags);

	if (ret != 0 && ret != -EAGAIN)
		pr_err("%s: Failed to send the message. Error: %d\n",
		pr_err("%s: Failed to send on msgq. Hypercall error: %d\n",
			__func__, hh_ret);

	return ret;
Loading