Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 51661a72 authored by Manoj Prabhu B's avatar Manoj Prabhu B
Browse files

memshare: Modify the driver to be dynamically loadable



Align memshare driver to be generic kernel image compliant by
modifying the driver to dynamically loadable.

Change-Id: I92bab11f8ff4279b94bd4bce6d17640a3715c26c
Signed-off-by: default avatarManoj Prabhu B <bmanoj@codeaurora.org>
parent b7a4d9d4
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -3,8 +3,8 @@
# Shared Heap for external processors
#
config MEM_SHARE_QMI_SERVICE
       depends on QCOM_QMI_HELPERS
       bool "Shared Heap for external processors"
	tristate "Shared Heap for external processors"
	select QCOM_QMI_HELPERS
	help
		Memory Share Kernel QTI Messaging Interface Service
		receives requests from Modem Processor Sub System
+10 −0
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
/* Copyright (c) 2013-2015, 2017-2020, The Linux Foundation. All rights reserved.
 */

#include <linux/module.h>
#include <linux/soc/qcom/qmi.h>
#include "heap_mem_ext_v01.h"

@@ -32,6 +33,7 @@ struct qmi_elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = {
		.tlv_type       = QMI_COMMON_TLV_TYPE,
	},
};
EXPORT_SYMBOL(dhms_mem_alloc_addr_info_type_v01_ei);

struct qmi_elem_info mem_alloc_generic_req_msg_data_v01_ei[] = {
	{
@@ -112,6 +114,7 @@ struct qmi_elem_info mem_alloc_generic_req_msg_data_v01_ei[] = {
		.tlv_type       = QMI_COMMON_TLV_TYPE,
	},
};
EXPORT_SYMBOL(mem_alloc_generic_req_msg_data_v01_ei);

struct qmi_elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = {
	{
@@ -183,6 +186,7 @@ struct qmi_elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = {
		.tlv_type       = QMI_COMMON_TLV_TYPE,
	},
};
EXPORT_SYMBOL(mem_alloc_generic_resp_msg_data_v01_ei);

struct qmi_elem_info mem_free_generic_req_msg_data_v01_ei[] = {
	{
@@ -247,6 +251,7 @@ struct qmi_elem_info mem_free_generic_req_msg_data_v01_ei[] = {
		.tlv_type       = QMI_COMMON_TLV_TYPE,
	},
};
EXPORT_SYMBOL(mem_free_generic_req_msg_data_v01_ei);

struct qmi_elem_info mem_free_generic_resp_msg_data_v01_ei[] = {
	{
@@ -266,6 +271,7 @@ struct qmi_elem_info mem_free_generic_resp_msg_data_v01_ei[] = {
		.tlv_type       = QMI_COMMON_TLV_TYPE,
	},
};
EXPORT_SYMBOL(mem_free_generic_resp_msg_data_v01_ei);

struct qmi_elem_info mem_query_size_req_msg_data_v01_ei[] = {
	{
@@ -301,6 +307,7 @@ struct qmi_elem_info mem_query_size_req_msg_data_v01_ei[] = {
		.tlv_type       = QMI_COMMON_TLV_TYPE,
	},
};
EXPORT_SYMBOL(mem_query_size_req_msg_data_v01_ei);

struct qmi_elem_info mem_query_size_resp_msg_data_v01_ei[] = {
	{
@@ -338,3 +345,6 @@ struct qmi_elem_info mem_query_size_resp_msg_data_v01_ei[] = {
		.tlv_type       = QMI_COMMON_TLV_TYPE,
	},
};
EXPORT_SYMBOL(mem_query_size_resp_msg_data_v01_ei);

MODULE_LICENSE("GPL v2");
+58 −78
Original line number Diff line number Diff line
@@ -21,7 +21,6 @@

/* Macros */
#define MEMSHARE_DEV_NAME "memshare"
#define MEMSHARE_CHILD_DEV_NAME "memshare_child"
static unsigned long(attrs);

static struct qmi_handle *mem_share_svc_handle;
@@ -72,7 +71,7 @@ static int mem_share_configure_ramdump(int client)
		clnt = "DIAG";
		break;
	default:
		dev_err(memsh_child->dev, "memshare: no memshare clients registered\n");
		dev_err(memsh_drv->dev, "memshare: no memshare clients registered\n");
		return -EINVAL;
	}

@@ -83,12 +82,12 @@ static int mem_share_configure_ramdump(int client)
			create_ramdump_device(client_name,
				memshare_dev[client]);
	} else {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare: invalid memshare device for creating ramdump device\n");
		return -ENODEV;
	}
	if (IS_ERR_OR_NULL(memshare_ramdump_dev[client])) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare: unable to create memshare ramdump device\n");
		memshare_ramdump_dev[client] = NULL;
		return -ENOMEM;
@@ -110,7 +109,7 @@ static int check_client(int client_id, int proc, int request)
		}
	}
	if ((found == DHMS_MEM_CLIENT_INVALID) && !request) {
		dev_dbg(memsh_child->dev,
		dev_dbg(memsh_drv->dev,
			"memshare: No registered client for the client_id: %d, adding a new client\n",
			client_id);
		/* Add a new client */
@@ -125,7 +124,7 @@ static int check_client(int client_id, int proc, int request)
				if (!memblock[i].file_created) {
					rc = mem_share_configure_ramdump(i);
					if (rc)
						dev_err(memsh_child->dev,
						dev_err(memsh_drv->dev,
							"memshare_check_client: cannot create ramdump for client with id: %d\n",
							client_id);
					else
@@ -222,14 +221,14 @@ static int mem_share_do_ramdump(void)
			client_name = "DIAG";
			break;
		default:
			dev_err(memsh_child->dev,
			dev_err(memsh_drv->dev,
				"memshare: no memshare clients registered for client_id: %d\n",
				i);
			return -EINVAL;
		}

		if (!memblock[i].allotted) {
			dev_err(memsh_child->dev, "memshare: %s: memblock is not allotted\n",
			dev_err(memsh_drv->dev, "memshare: %s: memblock is not allotted\n",
			client_name);
			continue;
		}
@@ -237,7 +236,7 @@ static int mem_share_do_ramdump(void)
		if (memblock[i].hyp_mapping &&
			memblock[i].peripheral ==
			DHMS_MEM_PROC_MPSS_V01) {
			dev_dbg(memsh_child->dev,
			dev_dbg(memsh_drv->dev,
				"memshare: %s: hypervisor unmapping for client before elf dump\n",
				client_name);
			if (memblock[i].alloc_request)
@@ -255,7 +254,7 @@ static int mem_share_do_ramdump(void)
				 * earlier but during unmap
				 * it lead to failure.
				 */
				dev_err(memsh_child->dev,
				dev_err(memsh_drv->dev,
					"memshare: %s: failed to map the memory region to APPS\n",
					client_name);
				continue;
@@ -275,14 +274,14 @@ static int mem_share_do_ramdump(void)
			ramdump_segments_tmp[0].v_address =
				memblock[i].virtual_addr;

			dev_dbg(memsh_child->dev, "memshare: %s: Begin elf dump for size = %d\n",
			dev_dbg(memsh_drv->dev, "memshare: %s: Begin elf dump for size = %d\n",
				client_name, memblock[i].size);

			ret = do_elf_ramdump(memshare_ramdump_dev[i],
						ramdump_segments_tmp, 1);
			kfree(ramdump_segments_tmp);
			if (ret < 0) {
				dev_err(memsh_child->dev,
				dev_err(memsh_drv->dev,
					"memshare: %s: Unable to elf dump with failure: %d\n",
					client_name, ret);
				return ret;
@@ -324,22 +323,22 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
		}

		if (notifdata->enable_ramdump && ramdump_event) {
			dev_info(memsh_child->dev, "memshare: Ramdump collection is enabled\n");
			dev_info(memsh_drv->dev, "memshare: Ramdump collection is enabled\n");
			ret = mem_share_do_ramdump();
			if (ret)
				dev_err(memsh_child->dev, "memshare: Ramdump collection failed\n");
				dev_err(memsh_drv->dev, "memshare: Ramdump collection failed\n");
			ramdump_event = false;
		}
		break;

	case SUBSYS_AFTER_POWERUP:
		dev_dbg(memsh_child->dev, "memshare: Modem has booted up\n");
		dev_dbg(memsh_drv->dev, "memshare: Modem has booted up\n");
		for (i = 0; i < MAX_CLIENTS; i++) {
			size = memblock[i].size;
			if (memblock[i].free_memory > 0 &&
					bootup_request >= 2) {
				memblock[i].free_memory -= 1;
				dev_dbg(memsh_child->dev, "memshare: free_memory count: %d for client id: %d\n",
				dev_dbg(memsh_drv->dev, "memshare: free_memory count: %d for client id: %d\n",
					memblock[i].free_memory,
					memblock[i].client_id);
			}
@@ -351,7 +350,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
				!memblock[i].client_request &&
				memblock[i].allotted &&
				!memblock[i].alloc_request) {
				dev_info(memsh_child->dev,
				dev_info(memsh_drv->dev,
					"memshare: hypervisor unmapping for allocated memory with client id: %d\n",
					memblock[i].client_id);
				if (memblock[i].hyp_mapping) {
@@ -369,7 +368,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
						 * earlier but during unmap
						 * it lead to failure.
						 */
						dev_err(memsh_child->dev,
						dev_err(memsh_drv->dev,
							"memshare: failed to hypervisor unmap the memory region for client id: %d\n",
							memblock[i].client_id);
					} else {
@@ -414,7 +413,7 @@ static void shared_hyp_mapping(int client_id)
	int dest_perms[1] = {PERM_READ|PERM_WRITE};

	if (client_id == DHMS_MEM_CLIENT_INVALID) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare: hypervisor mapping failure for invalid client\n");
		return;
	}
@@ -425,7 +424,7 @@ static void shared_hyp_mapping(int client_id)
			dest_perms, 1);

	if (ret != 0) {
		dev_err(memsh_child->dev, "memshare: hyp_assign_phys failed size=%u err=%d\n",
		dev_err(memsh_drv->dev, "memshare: hyp_assign_phys failed size=%u err=%d\n",
				memblock[client_id].size, ret);
		return;
	}
@@ -443,7 +442,7 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,

	mutex_lock(&memsh_drv->mem_share);
	alloc_req = (struct mem_alloc_generic_req_msg_v01 *)decoded_msg;
	dev_info(memsh_child->dev,
	dev_info(memsh_drv->dev,
		"memshare_alloc: memory alloc request received for client id: %d, proc_id: %d, request size: %d\n",
		alloc_req->client_id, alloc_req->proc_id, alloc_req->num_bytes);
	alloc_resp = kzalloc(sizeof(*alloc_resp),
@@ -458,7 +457,7 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
								CHECK);

	if (client_id >= MAX_CLIENTS) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare_alloc: client not found, requested client: %d, proc_id: %d\n",
			alloc_req->client_id, alloc_req->proc_id);
		kfree(alloc_resp);
@@ -475,7 +474,7 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
		rc = memshare_alloc(memsh_drv->dev, size,
					&memblock[client_id]);
		if (rc) {
			dev_err(memsh_child->dev,
			dev_err(memsh_drv->dev,
				"memshare_alloc: unable to allocate memory of size: %d for requested client\n",
				size);
			resp = 1;
@@ -487,7 +486,7 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
			memblock[client_id].peripheral = alloc_req->proc_id;
		}
	}
	dev_dbg(memsh_child->dev,
	dev_dbg(memsh_drv->dev,
		"memshare_alloc: free memory count for client id: %d = %d\n",
		memblock[client_id].client_id, memblock[client_id].free_memory);

@@ -503,7 +502,7 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
		memblock[client_id].allotted)
		shared_hyp_mapping(client_id);
	mutex_unlock(&memsh_drv->mem_share);
	dev_info(memsh_child->dev,
	dev_info(memsh_drv->dev,
		"memshare_alloc: client_id: %d, alloc_resp.num_bytes: %d, alloc_resp.resp.result: %lx\n",
		alloc_req->client_id,
		alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
@@ -514,7 +513,7 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
			  sizeof(struct mem_alloc_generic_resp_msg_v01),
			  mem_alloc_generic_resp_msg_data_v01_ei, alloc_resp);
	if (rc < 0)
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
		"memshare_alloc: Error sending the alloc response: %d\n",
		rc);

@@ -538,17 +537,17 @@ static void handle_free_generic_req(struct qmi_handle *handle,
	memset(&free_resp, 0, sizeof(free_resp));
	free_resp.resp.error = QMI_ERR_INTERNAL_V01;
	free_resp.resp.result = QMI_RESULT_FAILURE_V01;
	dev_info(memsh_child->dev,
	dev_info(memsh_drv->dev,
		"memshare_free: handling memory free request with client id: %d, proc_id: %d\n",
		free_req->client_id, free_req->proc_id);
	client_id = check_client(free_req->client_id, free_req->proc_id, FREE);
	if (client_id == DHMS_MEM_CLIENT_INVALID) {
		dev_err(memsh_child->dev, "memshare_free: invalid client request to free memory\n");
		dev_err(memsh_drv->dev, "memshare_free: invalid client request to free memory\n");
		flag = 1;
	} else if (!memblock[client_id].guarantee &&
				!memblock[client_id].client_request &&
				memblock[client_id].allotted) {
		dev_dbg(memsh_child->dev,
		dev_dbg(memsh_drv->dev,
			"memshare_free: hypervisor unmapping for client_id:%d - size: %d\n",
			client_id, memblock[client_id].size);
		ret = hyp_assign_phys(memblock[client_id].phy_addr,
@@ -559,7 +558,7 @@ static void handle_free_generic_req(struct qmi_handle *handle,
		 * This is an error case as hyp mapping was successful
		 * earlier but during unmap it lead to failure.
		 */
			dev_err(memsh_child->dev,
			dev_err(memsh_drv->dev,
				"memshare_free: failed to unmap the region for client id:%d\n",
				client_id);
		}
@@ -578,7 +577,7 @@ static void handle_free_generic_req(struct qmi_handle *handle,
			attrs);
		free_client(client_id);
	} else {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare_free: cannot free the memory for a guaranteed client (client_id: %d)\n",
			client_id);
	}
@@ -597,7 +596,7 @@ static void handle_free_generic_req(struct qmi_handle *handle,
			  MEM_FREE_REQ_MAX_MSG_LEN_V01,
			  mem_free_generic_resp_msg_data_v01_ei, &free_resp);
	if (rc < 0)
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
		"memshare_free: error sending the free response: %d\n", rc);

}
@@ -617,14 +616,14 @@ static void handle_query_size_req(struct qmi_handle *handle,
		mutex_unlock(&memsh_drv->mem_share);
		return;
	}
	dev_dbg(memsh_child->dev,
	dev_dbg(memsh_drv->dev,
		"memshare_query: query on availalbe memory size for client id: %d, proc_id: %d\n",
		query_req->client_id, query_req->proc_id);
	client_id = check_client(query_req->client_id, query_req->proc_id,
								CHECK);

	if (client_id >= MAX_CLIENTS) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare_query: client not found, requested client: %d, proc_id: %d\n",
			query_req->client_id, query_req->proc_id);
		kfree(query_resp);
@@ -644,7 +643,7 @@ static void handle_query_size_req(struct qmi_handle *handle,
	query_resp->resp.error = QMI_ERR_NONE_V01;
	mutex_unlock(&memsh_drv->mem_share);

	dev_info(memsh_child->dev,
	dev_info(memsh_drv->dev,
		"memshare_query: client_id : %d, query_resp.size :%d, query_resp.resp.result :%lx\n",
		query_req->client_id, query_resp->size,
		(unsigned long)query_resp->resp.result);
@@ -653,7 +652,7 @@ static void handle_query_size_req(struct qmi_handle *handle,
			  MEM_QUERY_MAX_MSG_LEN_V01,
			  mem_query_size_resp_msg_data_v01_ei, query_resp);
	if (rc < 0)
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
		"memshare_query: Error sending the query response: %d\n", rc);

	kfree(query_resp);
@@ -697,11 +696,11 @@ int memshare_alloc(struct device *dev,
					unsigned int block_size,
					struct mem_blocks *pblk)
{
	dev_dbg(memsh_child->dev,
		"memshare: allocation request for size: %d\n", block_size);
	dev_dbg(memsh_drv->dev,
		"memshare: allocation request for size: %d", block_size);

	if (!pblk) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare: Failed memory block allocation\n");
		return -ENOMEM;
	}
@@ -735,7 +734,7 @@ static void memshare_init_worker(struct work_struct *work)
		sizeof(struct qmi_elem_info),
		&server_ops, qmi_memshare_handlers);
	if (rc < 0) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare: Creating mem_share_svc qmi handle failed\n");
		kfree(mem_share_svc_handle);
		destroy_workqueue(mem_share_svc_workqueue);
@@ -744,14 +743,14 @@ static void memshare_init_worker(struct work_struct *work)
	rc = qmi_add_server(mem_share_svc_handle, MEM_SHARE_SERVICE_SVC_ID,
		MEM_SHARE_SERVICE_VERS, MEM_SHARE_SERVICE_INS_ID);
	if (rc < 0) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare: Registering mem share svc failed %d\n", rc);
		qmi_handle_release(mem_share_svc_handle);
		kfree(mem_share_svc_handle);
		destroy_workqueue(mem_share_svc_workqueue);
		return;
	}
	dev_dbg(memsh_child->dev, "memshare: memshare_init successful\n");
	dev_dbg(memsh_drv->dev, "memshare: memshare_init successful\n");
}

static int memshare_child_probe(struct platform_device *pdev)
@@ -782,7 +781,7 @@ static int memshare_child_probe(struct platform_device *pdev)
	rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id",
						&client_id);
	if (rc) {
		dev_err(memsh_child->dev, "memshare: Error reading client id, rc: %d\n",
		dev_err(memsh_drv->dev, "memshare: Error reading client id, rc: %d\n",
				rc);
		return rc;
	}
@@ -802,7 +801,7 @@ static int memshare_child_probe(struct platform_device *pdev)
	rc = of_property_read_string(pdev->dev.of_node, "label",
						&name);
	if (rc) {
		dev_err(memsh_child->dev, "memshare: Error reading peripheral info for client, rc: %d\n",
		dev_err(memsh_drv->dev, "memshare: Error reading peripheral info for client, rc: %d\n",
					rc);
		return rc;
	}
@@ -827,7 +826,7 @@ static int memshare_child_probe(struct platform_device *pdev)
				size,
				&memblock[num_clients]);
		if (rc) {
			dev_err(memsh_child->dev,
			dev_err(memsh_drv->dev,
				"memshare_child: Unable to allocate memory for guaranteed clients, rc: %d\n",
				rc);
			return rc;
@@ -847,7 +846,7 @@ static int memshare_child_probe(struct platform_device *pdev)
	if (!memblock[num_clients].file_created) {
		rc = mem_share_configure_ramdump(num_clients);
		if (rc)
			dev_err(memsh_child->dev,
			dev_err(memsh_drv->dev,
			"memshare_child: cannot create ramdump for client with id: %d\n",
			memblock[num_clients].client_id);
		else
@@ -863,13 +862,21 @@ static int memshare_probe(struct platform_device *pdev)
{
	int rc;
	struct memshare_driver *drv;
	struct device *dev = &pdev->dev;

	if (of_device_is_compatible(dev->of_node,
					"qcom,memshare-peripheral"))
		return memshare_child_probe(pdev);

	drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver),
							GFP_KERNEL);

	if (!drv)
		return -ENOMEM;

	drv->dev = &pdev->dev;
	memsh_drv = drv;
	platform_set_drvdata(pdev, memsh_drv);

	/* Memory allocation has been done successfully */
	mutex_init(&drv->mem_free);
	mutex_init(&drv->mem_share);
@@ -877,9 +884,6 @@ static int memshare_probe(struct platform_device *pdev)
	INIT_WORK(&drv->memshare_init_work, memshare_init_worker);
	schedule_work(&drv->memshare_init_work);

	drv->dev = &pdev->dev;
	memsh_drv = drv;
	platform_set_drvdata(pdev, memsh_drv);
	initialize_client();
	num_clients = 0;

@@ -887,13 +891,13 @@ static int memshare_probe(struct platform_device *pdev)
				&pdev->dev);

	if (rc) {
		dev_err(memsh_child->dev,
		dev_err(memsh_drv->dev,
			"memshare: error populating the devices\n");
		return rc;
	}

	subsys_notif_register_notifier("modem", &nb);
	dev_dbg(memsh_child->dev, "memshare: Memshare inited\n");
	dev_dbg(memsh_drv->dev, "memshare: Memshare inited\n");

	return 0;
}
@@ -910,26 +914,12 @@ static int memshare_remove(struct platform_device *pdev)
	return 0;
}

static int memshare_child_remove(struct platform_device *pdev)
{
	return 0;
}

static const struct of_device_id memshare_match_table[] = {
	{
		.compatible = "qcom,memshare",
	},
	{}
};

static const struct of_device_id memshare_match_table1[] = {
	{
		.compatible = "qcom,memshare-peripheral",
	},
	{ .compatible = "qcom,memshare", },
	{ .compatible = "qcom,memshare-peripheral", },
	{}
};


static struct platform_driver memshare_pdriver = {
	.probe          = memshare_probe,
	.remove         = memshare_remove,
@@ -939,17 +929,7 @@ static struct platform_driver memshare_pdriver = {
	},
};

static struct platform_driver memshare_pchild = {
	.probe          = memshare_child_probe,
	.remove         = memshare_child_remove,
	.driver = {
		.name   = MEMSHARE_CHILD_DEV_NAME,
		.of_match_table = memshare_match_table1,
	},
};

module_platform_driver(memshare_pdriver);
module_platform_driver(memshare_pchild);

MODULE_DESCRIPTION("Mem Share QMI Service Driver");
MODULE_LICENSE("GPL v2");