Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb9778b2 authored by Veera Sundaram Sankaran's avatar Veera Sundaram Sankaran
Browse files

drm/msm/sde: move MDP non-secure CB to a separate device



Currently, the MDP non-secure context-bank is part of the
MDP device. Move it to a separate device, so that it would
have its own probe and make the implementation similar
to other display context-banks. This change also fixes
the way secure-buffers are attached/mapped during the
prime_fd_to_handle time.

Change-Id: I2aadfb573c10f904e4e1a79b09cac889da5e8a21
Signed-off-by: default avatarVeera Sundaram Sankaran <veeras@codeaurora.org>
parent 52a95c85
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -37,8 +37,6 @@
		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
		interrupt-controller;
		#interrupt-cells = <1>;
		iommus = <&apps_smmu 0x820 0x402>;
		qcom,iommu-dma = "disabled";

		#power-domain-cells = <0>;

@@ -234,6 +232,12 @@
			};
		};

		smmu_sde_unsec: qcom,smmu_sde_unsec_cb {
			compatible = "qcom,smmu_sde_unsec";
			iommus = <&apps_smmu 0x820 0x402>;
			qcom,iommu-dma = "disabled";
		};

		smmu_sde_sec: qcom,smmu_sde_sec_cb {
			compatible = "qcom,smmu_sde_sec";
			iommus = <&apps_smmu 0x821 0x400>;
+16 −42
Original line number Diff line number Diff line
/*
 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
@@ -79,6 +79,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
static struct page **get_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct device *aspace_dev;

	if (obj->import_attach)
		return msm_obj->pages;
@@ -114,10 +115,12 @@ static struct page **get_pages(struct drm_gem_object *obj)
		 * Make sure to flush the CPU cache for newly allocated memory
		 * so we don't get ourselves into trouble with a dirty cache
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
			aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
			dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
				msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
		}
	}

	return msm_obj->pages;
}
@@ -136,6 +139,7 @@ static void put_pages_vram(struct drm_gem_object *obj)

static void put_pages(struct drm_gem_object *obj)
{
	struct device *aspace_dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
@@ -144,10 +148,13 @@ static void put_pages(struct drm_gem_object *obj)
			 * pages are clean because display controller,
			 * GPU, etc. are not coherent:
			 */
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
				aspace_dev =
				    msm_gem_get_aspace_device(msm_obj->aspace);
				dma_unmap_sg(aspace_dev, msm_obj->sgt->sgl,
					     msm_obj->sgt->nents,
					     DMA_BIDIRECTIONAL);
			}

			sg_free_table(msm_obj->sgt);
			kfree(msm_obj->sgt);
@@ -187,6 +194,7 @@ void msm_gem_put_pages(struct drm_gem_object *obj)
void msm_gem_sync(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj;
	struct device *aspace_dev;

	if (!obj)
		return;
@@ -197,7 +205,8 @@ void msm_gem_sync(struct drm_gem_object *obj)
	 * dma_sync_sg_for_device synchronises a single contiguous or
	 * scatter/gather mapping for the CPU and device.
	 */
	dma_sync_sg_for_device(obj->dev->dev, msm_obj->sgt->sgl,
	aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
	dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
		       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
}

@@ -427,44 +436,9 @@ int msm_gem_get_iova(struct drm_gem_object *obj,

	if (!vma) {
		struct page **pages;
		struct device *dev;
		struct dma_buf *dmabuf;
		bool reattach = false;

		/*
		 * both secure/non-secure domains are attached with the default
		 * devive (non-sec) with dma_buf_attach during
		 * msm_gem_prime_import. detach and attach the correct device
		 * to the dma_buf based on the aspace domain.
		 */
		dev = msm_gem_get_aspace_device(aspace);
		if (dev && obj->import_attach &&
				(dev != obj->import_attach->dev)) {
			dmabuf = obj->import_attach->dmabuf;

			DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
					 obj->import_attach->dev, dev);
			SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt);


			if (msm_obj->sgt)
				dma_buf_unmap_attachment(obj->import_attach,
							msm_obj->sgt,
							DMA_BIDIRECTIONAL);
			dma_buf_detach(dmabuf, obj->import_attach);

			obj->import_attach = dma_buf_attach(dmabuf, dev);
			if (IS_ERR(obj->import_attach)) {
				DRM_ERROR("dma_buf_attach failure, err=%ld\n",
						PTR_ERR(obj->import_attach));
				goto unlock;
			}
			reattach = true;
		}

		/* perform delayed import for buffers without existing sgt */
		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
				|| reattach) {
		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))) {
			ret = msm_gem_delayed_import(obj);
			if (ret) {
				DRM_ERROR("delayed dma-buf import failed %d\n",
+32 −11
Original line number Diff line number Diff line
/*
 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
@@ -18,9 +18,12 @@

#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_kms.h"

#include <linux/dma-buf.h>
#include <linux/ion.h>
#include <linux/msm_ion.h>

struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@@ -87,13 +90,19 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
	struct dma_buf_attachment *attach;
	struct sg_table *sgt = NULL;
	struct drm_gem_object *obj;
	struct device *attach_dev;
	struct device *attach_dev = NULL;
	unsigned long flags = 0;
	struct msm_drm_private *priv;
	struct msm_kms *kms;
	int ret;
	u32 domain;

	if (!dma_buf)
	if (!dma_buf || !dev->dev_private)
		return ERR_PTR(-EINVAL);

	priv = dev->dev_private;
	kms = priv->kms;

	if (dma_buf->priv && !dma_buf->ops->begin_cpu_access) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
@@ -111,25 +120,37 @@ struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
		return ERR_PTR(-EINVAL);
	}

	attach_dev = dev->dev;
	get_dma_buf(dma_buf);

	ret = dma_buf_get_flags(dma_buf, &flags);
	if (ret) {
		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
		goto fail_put;
	}

	domain = (flags & ION_FLAG_SECURE) ? MSM_SMMU_DOMAIN_SECURE :
						MSM_SMMU_DOMAIN_UNSECURE;
	if (kms && kms->funcs->get_address_space_device)
		attach_dev = kms->funcs->get_address_space_device(
							kms, domain);
	if (!attach_dev) {
		DRM_ERROR("aspace device not found for domain:%d\n", domain);
		ret = -EINVAL;
		goto fail_put;
	}

	attach = dma_buf_attach(dma_buf, attach_dev);
	if (IS_ERR(attach)) {
		DRM_ERROR("dma_buf_attach failure, err=%ld\n", PTR_ERR(attach));
		return ERR_CAST(attach);
	}

	get_dma_buf(dma_buf);

	/*
	 * For cached buffers where CPU access is required, dma_map_attachment
	 * must be called now to allow user-space to perform cpu sync begin/end
	 * otherwise do delayed mapping during the commit.
	 */
	ret = dma_buf_get_flags(dma_buf, &flags);
	if (ret) {
		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
		goto fail_put;
	} else if (flags & ION_FLAG_CACHED) {
	if (flags & ION_FLAG_CACHED) {
		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
		sgt = dma_buf_map_attachment(
				attach, DMA_BIDIRECTIONAL);
+4 −1
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
@@ -107,6 +107,9 @@ struct msm_kms_funcs {
	struct msm_gem_address_space *(*get_address_space)(
			struct msm_kms *kms,
			unsigned int domain);
	struct device *(*get_address_space_device)(
			struct msm_kms *kms,
			unsigned int domain);
#ifdef CONFIG_DEBUG_FS
	/* debugfs: */
	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
+0 −21
Original line number Diff line number Diff line
@@ -363,27 +363,6 @@ static struct device *msm_smmu_device_create(struct device *dev,
	}
	DRM_DEBUG("found domain %d compat: %s\n", domain, compat);

	if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
		int rc;

		smmu->client = devm_kzalloc(dev,
				sizeof(struct msm_smmu_client), GFP_KERNEL);
		if (!smmu->client)
			return ERR_PTR(-ENOMEM);

		smmu->client->dev = dev;

		rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
			msm_smmu_dt_match[i].data);
		if (rc) {
			devm_kfree(dev, smmu->client);
			smmu->client = NULL;
			return ERR_PTR(rc);
		}

		return NULL;
	}

	child = of_find_compatible_node(dev->of_node, NULL, compat);
	if (!child) {
		DRM_DEBUG("unable to find compatible node for %s\n", compat);
Loading