Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92422e1d authored by Seemanta Dutta's avatar Seemanta Dutta Committed by Gerrit - the friendly Code Review server
Browse files

cam: smmu: Schedule work queue for triggering page fault handler



If page fault handler is triggered from within the same context
as ARM IOMMU driver, it can lead to deadlock due to improper
grabbing of mutexes.

This can be avoided if the page fault handler context is
immediately returned to ARM IOMMU driver after scheduling
work queue  for triggering client specific fault handler
registered with CAM SMMU driver.

So, schedule tasklet for triggering page fault handler of all
registered clients.

Change-Id: I7fbe520f7e27c9a4f4917cd259f138c64ee76e78
Signed-off-by: default avatarSeemanta Dutta <seemanta@codeaurora.org>
parent 566d2506
Loading
Loading
Loading
Loading
+71 −11
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/qcom_iommu.h>
#include <linux/dma-mapping.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/workqueue.h>
#include "cam_smmu_api.h"

#define SCRATCH_ALLOC_START SZ_128K
@@ -44,6 +45,16 @@
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
#endif

struct cam_smmu_work_payload {
	int idx;
	struct iommu_domain *domain;
	struct device *dev;
	unsigned long iova;
	int flags;
	void *token;
	struct list_head list;
};

enum cam_protection_type {
	CAM_PROT_INVALID,
	CAM_NON_SECURE,
@@ -99,6 +110,9 @@ struct cam_iommu_cb_set {
	struct cam_context_bank_info *cb_info;
	u32 cb_num;
	u32 cb_init_count;
	struct work_struct smmu_work;
	struct mutex payload_list_lock;
	struct list_head payload_list;
};

static struct of_device_id msm_cam_smmu_dt_match[] = {
@@ -175,6 +189,39 @@ static void cam_smmu_print_table(void);

static int cam_smmu_probe(struct platform_device *pdev);

static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr);

static void cam_smmu_page_fault_work(struct work_struct *work)
{
	int j;
	int idx;
	struct cam_smmu_work_payload *payload;

	mutex_lock(&iommu_cb_set.payload_list_lock);
	payload = list_first_entry(&iommu_cb_set.payload_list,
			struct cam_smmu_work_payload,
			list);
	list_del(&payload->list);
	mutex_unlock(&iommu_cb_set.payload_list_lock);

	/* Dereference the payload to call the handler */
	idx = payload->idx;
	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
	cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova);
	for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
		if ((iommu_cb_set.cb_info[idx].handler[j])) {
			iommu_cb_set.cb_info[idx].handler[j](
				payload->domain,
				payload->dev,
				payload->iova,
				payload->flags,
				iommu_cb_set.cb_info[idx].token[j]);
		}
	}
	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
	kfree(payload);
}

static void cam_smmu_print_list(int idx)
{
	struct cam_dma_buff_info *mapping;
@@ -341,7 +388,8 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
		int flags, void *token)
{
	char *cb_name;
	int idx, j = 0;
	int idx;
	struct cam_smmu_work_payload *payload;

	if (!token) {
		pr_err("Error: token is NULL\n");
@@ -363,16 +411,23 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
		return 0;
	}

	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
	cam_smmu_check_vaddr_in_range(idx, (void *)iova);
	for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
		if ((iommu_cb_set.cb_info[idx].handler[j])) {
			iommu_cb_set.cb_info[idx].handler[j](
				domain, dev, iova, flags,
				iommu_cb_set.cb_info[idx].token[j]);
		}
	}
	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
	payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
	if (!payload)
		return 0;

	payload->domain = domain;
	payload->dev = dev;
	payload->iova = iova;
	payload->flags = flags;
	payload->token = token;
	payload->idx = idx;

	mutex_lock(&iommu_cb_set.payload_list_lock);
	list_add_tail(&payload->list, &iommu_cb_set.payload_list);
	mutex_unlock(&iommu_cb_set.payload_list_lock);

	schedule_work(&iommu_cb_set.smmu_work);

	return 0;
}

@@ -1582,6 +1637,11 @@ static int cam_smmu_probe(struct platform_device *pdev)
				NULL, &pdev->dev);
	if (rc < 0)
		pr_err("Error: populating devices\n");

	INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
	mutex_init(&iommu_cb_set.payload_list_lock);
	INIT_LIST_HEAD(&iommu_cb_set.payload_list);

	return rc;
}