Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d84f3fa5 authored by Veera Sundaram Sankaran's avatar Veera Sundaram Sankaran
Browse files

drm/msm: remove smmu device mapping/attach from SDE driver probe



In 4.19, the SMMU driver handles mapping/attach of SMMU
context-banks for all the clients during bootup.
Remove the mapping/attach code from SDE driver probe and
add the corresponding device-tree properties. Additional
hints required for MDP non-secure/secure context-banks,
like early-map for cont-splash and secure-vmid id is also
passed through device-tree properties. The dynamic
attach/detach of context bank is still in place to support
few secure use-cases.

Change-Id: I5a076be8fdfdc82a19bdb7a435e5d98cbafd9802
Signed-off-by: default avatarVeera Sundaram Sankaran <veeras@codeaurora.org>
parent cb9778b2
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -235,13 +235,15 @@
		smmu_sde_unsec: qcom,smmu_sde_unsec_cb {
			compatible = "qcom,smmu_sde_unsec";
			iommus = <&apps_smmu 0x820 0x402>;
			qcom,iommu-dma = "disabled";
			qcom,iommu-dma-addr-pool = <0x00020000 0xfffe0000>;
			qcom,iommu-earlymap; /* for cont-splash */
		};

		smmu_sde_sec: qcom,smmu_sde_sec_cb {
			compatible = "qcom,smmu_sde_sec";
			iommus = <&apps_smmu 0x821 0x400>;
			qcom,iommu-dma = "disabled";
			qcom,iommu-dma-addr-pool = <0x00020000 0xfffe0000>;
			qcom,iommu-vmid = <0xa>;
		};

		/* data and reg bus scale settings */
+32 −109
Original line number Diff line number Diff line
@@ -29,17 +29,9 @@
#include "msm_mmu.h"
#include "sde_dbg.h"

#ifndef SZ_4G
#define SZ_4G	(((size_t) SZ_1G) * 4)
#endif

#ifndef SZ_2G
#define SZ_2G	(((size_t) SZ_1G) * 2)
#endif

struct msm_smmu_client {
	struct device *dev;
	struct dma_iommu_mapping *mmu_mapping;
	struct iommu_domain *domain;
	bool domain_attached;
	bool secure;
};
@@ -52,17 +44,12 @@ struct msm_smmu {

struct msm_smmu_domain {
	const char *label;
	size_t va_start;
	size_t va_size;
	bool secure;
};

#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
#define msm_smmu_to_client(smmu) (smmu->client)

static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
	const struct msm_smmu_domain *domain);

static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
		int cnt)
{
@@ -79,11 +66,9 @@ static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
	if (client->domain_attached)
		return 0;

	rc = __depr_arm_iommu_attach_device(client->dev,
			client->mmu_mapping);
	rc = iommu_attach_device(client->domain, client->dev);
	if (rc) {
		dev_err(client->dev, "iommu attach dev failed (%d)\n",
				rc);
		dev_err(client->dev, "iommu attach dev failed (%d)\n", rc);
		return rc;
	}

@@ -109,7 +94,7 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
		return;

	pm_runtime_get_sync(mmu->dev);
	__depr_arm_iommu_detach_device(client->dev);
	iommu_detach_device(client->domain, client->dev);
	pm_runtime_put_sync(mmu->dev);

	client->domain_attached = false;
@@ -123,10 +108,10 @@ static int msm_smmu_set_attribute(struct msm_mmu *mmu,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	int ret = 0;

	if (!client || !client->mmu_mapping)
	if (!client || !client->domain)
		return -ENODEV;

	ret = iommu_domain_set_attr(client->mmu_mapping->domain, attr, data);
	ret = iommu_domain_set_attr(client->domain, attr, data);
	if (ret)
		DRM_ERROR("set domain attribute failed:%d\n", ret);

@@ -140,10 +125,10 @@ static int msm_smmu_one_to_one_unmap(struct msm_mmu *mmu,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	int ret = 0;

	if (!client || !client->mmu_mapping)
	if (!client || !client->domain)
		return -ENODEV;

	ret = iommu_unmap(client->mmu_mapping->domain, dest_address, size);
	ret = iommu_unmap(client->domain, dest_address, size);
	if (ret != size)
		pr_err("smmu unmap failed\n");

@@ -157,10 +142,10 @@ static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	int ret = 0;

	if (!client || !client->mmu_mapping)
	if (!client || !client->domain)
		return -ENODEV;

	ret = iommu_map(client->mmu_mapping->domain, dest_address, dest_address,
	ret = iommu_map(client->domain, dest_address, dest_address,
			size, prot);
	if (ret)
		pr_err("smmu map failed\n");
@@ -176,13 +161,12 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
	size_t ret = 0;

	if (sgt && sgt->sgl) {
		ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl,
		ret = iommu_map_sg(client->domain, iova, sgt->sgl,
				sgt->nents, prot);
		WARN_ON((int)ret < 0);
		DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address,
				sgt->sgl->dma_length, prot);
		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
				prot);
		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, prot);
	}
	return (ret == len) ? 0 : -EINVAL;
}
@@ -194,7 +178,7 @@ static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);

	pm_runtime_get_sync(mmu->dev);
	iommu_unmap(client->mmu_mapping->domain, iova, len);
	iommu_unmap(client->domain, iova, len);
	pm_runtime_put_sync(mmu->dev);

	return 0;
@@ -304,26 +288,18 @@ static const struct msm_mmu_funcs funcs = {
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
	[MSM_SMMU_DOMAIN_UNSECURE] = {
		.label = "mdp_ns",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = false,
	},
	[MSM_SMMU_DOMAIN_SECURE] = {
		.label = "mdp_s",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = true,
	},
	[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
		.label = "rot_ns",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = false,
	},
	[MSM_SMMU_DOMAIN_NRT_SECURE] = {
		.label = "rot_s",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = true,
	},
};
@@ -386,19 +362,11 @@ struct msm_mmu *msm_smmu_new(struct device *dev,
{
	struct msm_smmu *smmu;
	struct device *client_dev;
	bool smmu_full_map;

	smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
	if (!smmu)
		return ERR_PTR(-ENOMEM);

	smmu_full_map = of_property_read_bool(dev->of_node,
					"qcom,fullsize-va-map");
	if (smmu_full_map) {
		msm_smmu_domains[domain].va_start = SZ_128K;
		msm_smmu_domains[domain].va_size = SZ_4G - SZ_128K;
	}

	client_dev = msm_smmu_device_create(dev, domain, smmu);
	if (IS_ERR(client_dev)) {
		kfree(smmu);
@@ -440,62 +408,6 @@ static int msm_smmu_fault_handler(struct iommu_domain *domain,
	return rc;
}

static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
	const struct msm_smmu_domain *domain)
{
	int rc;
	int mdphtw_llc_enable = 1;

	client->mmu_mapping = __depr_arm_iommu_create_mapping(
			&platform_bus_type, domain->va_start, domain->va_size);
	if (IS_ERR(client->mmu_mapping)) {
		dev_err(client->dev,
			"iommu create mapping failed for domain=%s\n",
			domain->label);
		return PTR_ERR(client->mmu_mapping);
	}

	rc = iommu_domain_set_attr(client->mmu_mapping->domain,
			DOMAIN_ATTR_USE_UPSTREAM_HINT, &mdphtw_llc_enable);
	if (rc) {
		dev_err(client->dev, "couldn't enable mdp pagetable walks: %d\n",
			rc);
		goto error;
	}

	if (domain->secure) {
		int secure_vmid = VMID_CP_PIXEL;

		client->secure = true;
		rc = iommu_domain_set_attr(client->mmu_mapping->domain,
				DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
		if (rc) {
			dev_err(client->dev, "couldn't set secure pix vmid\n");
			goto error;
		}
	}

	if (!client->dev->dma_parms)
		client->dev->dma_parms = devm_kzalloc(client->dev,
				sizeof(*client->dev->dma_parms), GFP_KERNEL);

	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));

	iommu_set_fault_handler(client->mmu_mapping->domain,
			msm_smmu_fault_handler, (void *)client);

	DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
			domain->label, domain->va_start, domain->va_size,
			domain->secure);

	return 0;

error:
	__depr_arm_iommu_release_mapping(client->mmu_mapping);
	return rc;
}

/**
 * msm_smmu_probe()
 * @pdev: platform device
@@ -510,7 +422,6 @@ static int msm_smmu_probe(struct platform_device *pdev)
	const struct of_device_id *match;
	struct msm_smmu_client *client;
	const struct msm_smmu_domain *domain;
	int rc;

	match = of_match_device(msm_smmu_dt_match, &pdev->dev);
	if (!match || !match->data) {
@@ -531,11 +442,27 @@ static int msm_smmu_probe(struct platform_device *pdev)
		return -ENOMEM;

	client->dev = &pdev->dev;
	client->domain = iommu_get_domain_for_dev(client->dev);
	if (!client->domain) {
		dev_err(&pdev->dev, "iommu get domain for dev failed\n");
		return -EINVAL;
	}

	if (!client->dev->dma_parms)
		client->dev->dma_parms = devm_kzalloc(client->dev,
				sizeof(*client->dev->dma_parms), GFP_KERNEL);
	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));

	iommu_set_fault_handler(client->domain,
			msm_smmu_fault_handler, (void *)client);

	DRM_INFO("Created domain %s, secure=%d\n",
			domain->label, domain->secure);

	rc = _msm_smmu_create_mapping(client, domain);
	platform_set_drvdata(pdev, client);

	return rc;
	return 0;
}

static int msm_smmu_remove(struct platform_device *pdev)
@@ -543,11 +470,7 @@ static int msm_smmu_remove(struct platform_device *pdev)
	struct msm_smmu_client *client;

	client = platform_get_drvdata(pdev);
	if (client->domain_attached) {
		__depr_arm_iommu_detach_device(client->dev);
	client->domain_attached = false;
	}
	__depr_arm_iommu_release_mapping(client->mmu_mapping);

	return 0;
}
+7 −37
Original line number Diff line number Diff line
@@ -2844,19 +2844,13 @@ static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)

static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
{
	struct msm_mmu *mmu;
	int i;

	for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
		if (!sde_kms->aspace[i])
			continue;

		mmu = sde_kms->aspace[i]->mmu;

		mmu->funcs->detach(mmu, (const char **)iommu_ports,
				ARRAY_SIZE(iommu_ports));
		msm_gem_address_space_put(sde_kms->aspace[i]);

		sde_kms->aspace[i] = NULL;
	}

@@ -2867,7 +2861,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
{
	struct msm_mmu *mmu;
	int i, ret;
	int early_map = 1;
	int early_map = 0;

	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
		struct msm_gem_address_space *aspace;
@@ -2880,41 +2874,15 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
			continue;
		}

		/*
		 * Before attaching SMMU, we need to honor continuous splash
		 * use case where hardware tries to fetch buffer from physical
		 * address. To facilitate this requirement we need to have a
		 * one to one mapping on SMMU until we have our first frame.
		 */
		if (i == MSM_SMMU_DOMAIN_UNSECURE) {
			ret = mmu->funcs->set_attribute(mmu,
				DOMAIN_ATTR_EARLY_MAP,
				&early_map);
			if (ret) {
				SDE_ERROR("failed to set map att: %d\n", ret);
				goto fail;
			}
		}

		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
			mmu, "sde");
		if (IS_ERR(aspace)) {
			ret = PTR_ERR(aspace);
			mmu->funcs->destroy(mmu);
			goto fail;
		}

		sde_kms->aspace[i] = aspace;

		ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
				ARRAY_SIZE(iommu_ports));
		if (ret) {
			SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
			msm_gem_address_space_put(aspace);
			goto fail;
		}
		aspace->domain_attached = true;
		early_map = 0;

		/* Mapping splash memory block */
		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
@@ -2927,18 +2895,20 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
		}

		/*
		 * Turning off early map after generating one to one
		 * mapping for splash address space.
		 * disable early-map which would have been enabled during
		 * bootup by smmu through the device-tree hint for cont-spash
		 */
		ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
				 &early_map);
		if (ret) {
			SDE_ERROR("failed to set map att ret:%d\n", ret);
			SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
					ret, early_map);
			goto early_map_fail;
		}
	}

	return 0;

early_map_fail:
	_sde_kms_unmap_all_splash_regions(sde_kms);
fail: