Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2b740cf2 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "drm/msm: remove smmu device mapping/attach from SDE driver probe"

parents 8f7798f5 d84f3fa5
Loading
Loading
Loading
Loading
+9 −3
Original line number Diff line number Diff line
@@ -37,8 +37,6 @@
		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
		interrupt-controller;
		#interrupt-cells = <1>;
		iommus = <&apps_smmu 0x820 0x402>;
		qcom,iommu-dma = "disabled";

		#power-domain-cells = <0>;

@@ -234,10 +232,18 @@
			};
		};

		smmu_sde_unsec: qcom,smmu_sde_unsec_cb {
			compatible = "qcom,smmu_sde_unsec";
			iommus = <&apps_smmu 0x820 0x402>;
			qcom,iommu-dma-addr-pool = <0x00020000 0xfffe0000>;
			qcom,iommu-earlymap; /* for cont-splash */
		};

		smmu_sde_sec: qcom,smmu_sde_sec_cb {
			compatible = "qcom,smmu_sde_sec";
			iommus = <&apps_smmu 0x821 0x400>;
			qcom,iommu-dma = "disabled";
			qcom,iommu-dma-addr-pool = <0x00020000 0xfffe0000>;
			qcom,iommu-vmid = <0xa>;
		};

		/* data and reg bus scale settings */
+16 −42
Original line number Diff line number Diff line
/*
 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
@@ -79,6 +79,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct device *aspace_dev;

	if (obj->import_attach)
		return msm_obj->pages;
@@ -114,10 +115,12 @@ static struct page **get_pages(struct drm_gem_object *obj)
		 * Make sure to flush the CPU cache for newly allocated memory
		 * so we don't get ourselves into trouble with a dirty cache
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
			aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
			dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
				msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
		}
	}

	return msm_obj->pages;
}
@@ -136,6 +139,7 @@ static void put_pages_vram(struct drm_gem_object *obj)

static void put_pages(struct drm_gem_object *obj)
{
	struct device *aspace_dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
@@ -144,10 +148,13 @@ static void put_pages(struct drm_gem_object *obj)
			 * pages are clean because display controller,
			 * GPU, etc. are not coherent:
			 */
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
				aspace_dev =
				    msm_gem_get_aspace_device(msm_obj->aspace);
				dma_unmap_sg(aspace_dev, msm_obj->sgt->sgl,
					     msm_obj->sgt->nents,
					     DMA_BIDIRECTIONAL);
			}

			sg_free_table(msm_obj->sgt);
			kfree(msm_obj->sgt);
@@ -187,6 +194,7 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
void msm_gem_sync(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj;
	struct device *aspace_dev;

	if (!obj)
		return;
@@ -197,7 +205,8 @@ void msm_gem_sync(struct drm_gem_object *obj)
	 * dma_sync_sg_for_device synchronises a single contiguous or
	 * scatter/gather mapping for the CPU and device.
	 */
	dma_sync_sg_for_device(obj->dev->dev, msm_obj->sgt->sgl,
	aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
	dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
		       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}

@@ -427,44 +436,9 @@ int msm_gem_get_iova(struct drm_gem_object *obj,

	if (!vma) {
		struct page **pages;
		struct device *dev;
		struct dma_buf *dmabuf;
		bool reattach = false;

		/*
		 * both secure/non-secure domains are attached with the default
		 * devive (non-sec) with dma_buf_attach during
		 * msm_gem_prime_import. detach and attach the correct device
		 * to the dma_buf based on the aspace domain.
		 */
		dev = msm_gem_get_aspace_device(aspace);
		if (dev && obj->import_attach &&
				(dev != obj->import_attach->dev)) {
			dmabuf = obj->import_attach->dmabuf;

			DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
					 obj->import_attach->dev, dev);
			SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt);


			if (msm_obj->sgt)
				dma_buf_unmap_attachment(obj->import_attach,
							msm_obj->sgt,
							DMA_BIDIRECTIONAL);
			dma_buf_detach(dmabuf, obj->import_attach);

			obj->import_attach = dma_buf_attach(dmabuf, dev);
			if (IS_ERR(obj->import_attach)) {
				DRM_ERROR("dma_buf_attach failure, err=%ld\n",
						PTR_ERR(obj->import_attach));
				goto unlock;
			}
			reattach = true;
		}

		/* perform delayed import for buffers without existing sgt */
		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
				|| reattach) {
		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))) {
			ret = msm_gem_delayed_import(obj);
			if (ret) {
				DRM_ERROR("delayed dma-buf import failed %d\n",
+32 −11
Original line number Diff line number Diff line
/*
 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
@@ -18,9 +18,12 @@

#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_kms.h"

#include <linux/dma-buf.h>
#include <linux/ion.h>
#include <linux/msm_ion.h>

struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@@ -87,13 +90,19 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
	struct dma_buf_attachment *attach;
	struct sg_table *sgt = NULL;
	struct drm_gem_object *obj;
	struct device *attach_dev;
	struct device *attach_dev = NULL;
	unsigned long flags = 0;
	struct msm_drm_private *priv;
	struct msm_kms *kms;
	int ret;
	u32 domain;

	if (!dma_buf)
	if (!dma_buf || !dev->dev_private)
		return ERR_PTR(-EINVAL);

	priv = dev->dev_private;
	kms = priv->kms;

	if (dma_buf->priv && !dma_buf->ops->begin_cpu_access) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
@@ -111,25 +120,37 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
		return ERR_PTR(-EINVAL);
	}

	attach_dev = dev->dev;
	get_dma_buf(dma_buf);

	ret = dma_buf_get_flags(dma_buf, &flags);
	if (ret) {
		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
		goto fail_put;
	}

	domain = (flags & ION_FLAG_SECURE) ? MSM_SMMU_DOMAIN_SECURE :
						MSM_SMMU_DOMAIN_UNSECURE;
	if (kms && kms->funcs->get_address_space_device)
		attach_dev = kms->funcs->get_address_space_device(
							kms, domain);
	if (!attach_dev) {
		DRM_ERROR("aspace device not found for domain:%d\n", domain);
		ret = -EINVAL;
		goto fail_put;
	}

	attach = dma_buf_attach(dma_buf, attach_dev);
	if (IS_ERR(attach)) {
		DRM_ERROR("dma_buf_attach failure, err=%ld\n", PTR_ERR(attach));
		return ERR_CAST(attach);
	}

	get_dma_buf(dma_buf);

	/*
	 * For cached buffers where CPU access is required, dma_map_attachment
	 * must be called now to allow user-space to perform cpu sync begin/end
	 * otherwise do delayed mapping during the commit.
	 */
	ret = dma_buf_get_flags(dma_buf, &flags);
	if (ret) {
		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
		goto fail_put;
	} else if (flags & ION_FLAG_CACHED) {
	if (flags & ION_FLAG_CACHED) {
		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
		sgt = dma_buf_map_attachment(
				attach, DMA_BIDIRECTIONAL);
+4 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
@@ -107,6 +107,9 @@ struct msm_kms_funcs {
	struct msm_gem_address_space *(*get_address_space)(
			struct msm_kms *kms,
			unsigned int domain);
	struct device *(*get_address_space_device)(
			struct msm_kms *kms,
			unsigned int domain);
#ifdef CONFIG_DEBUG_FS
	/* debugfs: */
	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
+32 −130
Original line number Diff line number Diff line
@@ -29,17 +29,9 @@
#include "msm_mmu.h"
#include "sde_dbg.h"

#ifndef SZ_4G
#define SZ_4G	(((size_t) SZ_1G) * 4)
#endif

#ifndef SZ_2G
#define SZ_2G	(((size_t) SZ_1G) * 2)
#endif

struct msm_smmu_client {
	struct device *dev;
	struct dma_iommu_mapping *mmu_mapping;
	struct iommu_domain *domain;
	bool domain_attached;
	bool secure;
};
@@ -52,17 +44,12 @@ struct msm_smmu {

struct msm_smmu_domain {
	const char *label;
	size_t va_start;
	size_t va_size;
	bool secure;
};

#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
#define msm_smmu_to_client(smmu) (smmu->client)

static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
	const struct msm_smmu_domain *domain);

static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
		int cnt)
{
@@ -79,11 +66,9 @@ static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
	if (client->domain_attached)
		return 0;

	rc = __depr_arm_iommu_attach_device(client->dev,
			client->mmu_mapping);
	rc = iommu_attach_device(client->domain, client->dev);
	if (rc) {
		dev_err(client->dev, "iommu attach dev failed (%d)\n",
				rc);
		dev_err(client->dev, "iommu attach dev failed (%d)\n", rc);
		return rc;
	}

@@ -109,7 +94,7 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
		return;

	pm_runtime_get_sync(mmu->dev);
	__depr_arm_iommu_detach_device(client->dev);
	iommu_detach_device(client->domain, client->dev);
	pm_runtime_put_sync(mmu->dev);

	client->domain_attached = false;
@@ -123,10 +108,10 @@ static int msm_smmu_set_attribute(struct msm_mmu *mmu,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	int ret = 0;

	if (!client || !client->mmu_mapping)
	if (!client || !client->domain)
		return -ENODEV;

	ret = iommu_domain_set_attr(client->mmu_mapping->domain, attr, data);
	ret = iommu_domain_set_attr(client->domain, attr, data);
	if (ret)
		DRM_ERROR("set domain attribute failed:%d\n", ret);

@@ -140,10 +125,10 @@ static int msm_smmu_one_to_one_unmap(struct msm_mmu *mmu,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	int ret = 0;

	if (!client || !client->mmu_mapping)
	if (!client || !client->domain)
		return -ENODEV;

	ret = iommu_unmap(client->mmu_mapping->domain, dest_address, size);
	ret = iommu_unmap(client->domain, dest_address, size);
	if (ret != size)
		pr_err("smmu unmap failed\n");

@@ -157,10 +142,10 @@ static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	int ret = 0;

	if (!client || !client->mmu_mapping)
	if (!client || !client->domain)
		return -ENODEV;

	ret = iommu_map(client->mmu_mapping->domain, dest_address, dest_address,
	ret = iommu_map(client->domain, dest_address, dest_address,
			size, prot);
	if (ret)
		pr_err("smmu map failed\n");
@@ -176,13 +161,12 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
	size_t ret = 0;

	if (sgt && sgt->sgl) {
		ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl,
		ret = iommu_map_sg(client->domain, iova, sgt->sgl,
				sgt->nents, prot);
		WARN_ON((int)ret < 0);
		DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address,
				sgt->sgl->dma_length, prot);
		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
				prot);
		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, prot);
	}
	return (ret == len) ? 0 : -EINVAL;
}
@@ -194,7 +178,7 @@ static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);

	pm_runtime_get_sync(mmu->dev);
	iommu_unmap(client->mmu_mapping->domain, iova, len);
	iommu_unmap(client->domain, iova, len);
	pm_runtime_put_sync(mmu->dev);

	return 0;
@@ -304,26 +288,18 @@ static const struct msm_mmu_funcs funcs = {
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
	[MSM_SMMU_DOMAIN_UNSECURE] = {
		.label = "mdp_ns",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = false,
	},
	[MSM_SMMU_DOMAIN_SECURE] = {
		.label = "mdp_s",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = true,
	},
	[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
		.label = "rot_ns",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = false,
	},
	[MSM_SMMU_DOMAIN_NRT_SECURE] = {
		.label = "rot_s",
		.va_start = SZ_2G,
		.va_size = SZ_4G - SZ_2G,
		.secure = true,
	},
};
@@ -363,27 +339,6 @@ static struct device *msm_smmu_device_create(struct device *dev,
	}
	DRM_DEBUG("found domain %d compat: %s\n", domain, compat);

	if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
		int rc;

		smmu->client = devm_kzalloc(dev,
				sizeof(struct msm_smmu_client), GFP_KERNEL);
		if (!smmu->client)
			return ERR_PTR(-ENOMEM);

		smmu->client->dev = dev;

		rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
			msm_smmu_dt_match[i].data);
		if (rc) {
			devm_kfree(dev, smmu->client);
			smmu->client = NULL;
			return ERR_PTR(rc);
		}

		return NULL;
	}

	child = of_find_compatible_node(dev->of_node, NULL, compat);
	if (!child) {
		DRM_DEBUG("unable to find compatible node for %s\n", compat);
@@ -407,19 +362,11 @@ struct msm_mmu *msm_smmu_new(struct device *dev,
{
	struct msm_smmu *smmu;
	struct device *client_dev;
	bool smmu_full_map;

	smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
	if (!smmu)
		return ERR_PTR(-ENOMEM);

	smmu_full_map = of_property_read_bool(dev->of_node,
					"qcom,fullsize-va-map");
	if (smmu_full_map) {
		msm_smmu_domains[domain].va_start = SZ_128K;
		msm_smmu_domains[domain].va_size = SZ_4G - SZ_128K;
	}

	client_dev = msm_smmu_device_create(dev, domain, smmu);
	if (IS_ERR(client_dev)) {
		kfree(smmu);
@@ -461,62 +408,6 @@ static int msm_smmu_fault_handler(struct iommu_domain *domain,
	return rc;
}

static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
	const struct msm_smmu_domain *domain)
{
	int rc;
	int mdphtw_llc_enable = 1;

	client->mmu_mapping = __depr_arm_iommu_create_mapping(
			&platform_bus_type, domain->va_start, domain->va_size);
	if (IS_ERR(client->mmu_mapping)) {
		dev_err(client->dev,
			"iommu create mapping failed for domain=%s\n",
			domain->label);
		return PTR_ERR(client->mmu_mapping);
	}

	rc = iommu_domain_set_attr(client->mmu_mapping->domain,
			DOMAIN_ATTR_USE_UPSTREAM_HINT, &mdphtw_llc_enable);
	if (rc) {
		dev_err(client->dev, "couldn't enable mdp pagetable walks: %d\n",
			rc);
		goto error;
	}

	if (domain->secure) {
		int secure_vmid = VMID_CP_PIXEL;

		client->secure = true;
		rc = iommu_domain_set_attr(client->mmu_mapping->domain,
				DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
		if (rc) {
			dev_err(client->dev, "couldn't set secure pix vmid\n");
			goto error;
		}
	}

	if (!client->dev->dma_parms)
		client->dev->dma_parms = devm_kzalloc(client->dev,
				sizeof(*client->dev->dma_parms), GFP_KERNEL);

	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));

	iommu_set_fault_handler(client->mmu_mapping->domain,
			msm_smmu_fault_handler, (void *)client);

	DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
			domain->label, domain->va_start, domain->va_size,
			domain->secure);

	return 0;

error:
	__depr_arm_iommu_release_mapping(client->mmu_mapping);
	return rc;
}

/**
 * msm_smmu_probe()
 * @pdev: platform device
@@ -531,7 +422,6 @@ static int msm_smmu_probe(struct platform_device *pdev)
	const struct of_device_id *match;
	struct msm_smmu_client *client;
	const struct msm_smmu_domain *domain;
	int rc;

	match = of_match_device(msm_smmu_dt_match, &pdev->dev);
	if (!match || !match->data) {
@@ -552,11 +442,27 @@ static int msm_smmu_probe(struct platform_device *pdev)
		return -ENOMEM;

	client->dev = &pdev->dev;
	client->domain = iommu_get_domain_for_dev(client->dev);
	if (!client->domain) {
		dev_err(&pdev->dev, "iommu get domain for dev failed\n");
		return -EINVAL;
	}

	if (!client->dev->dma_parms)
		client->dev->dma_parms = devm_kzalloc(client->dev,
				sizeof(*client->dev->dma_parms), GFP_KERNEL);
	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));

	iommu_set_fault_handler(client->domain,
			msm_smmu_fault_handler, (void *)client);

	DRM_INFO("Created domain %s, secure=%d\n",
			domain->label, domain->secure);

	rc = _msm_smmu_create_mapping(client, domain);
	platform_set_drvdata(pdev, client);

	return rc;
	return 0;
}

static int msm_smmu_remove(struct platform_device *pdev)
@@ -564,11 +470,7 @@ static int msm_smmu_remove(struct platform_device *pdev)
	struct msm_smmu_client *client;

	client = platform_get_drvdata(pdev);
	if (client->domain_attached) {
		__depr_arm_iommu_detach_device(client->dev);
	client->domain_attached = false;
	}
	__depr_arm_iommu_release_mapping(client->mmu_mapping);

	return 0;
}
Loading