Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0519f9a1 authored by Inki Dae's avatar Inki Dae Committed by Inki Dae
Browse files

drm/exynos: add iommu support for exynos drm framework



Changelog v4:
- fix condition to drm_iommu_detach_device funtion.

Changelog v3:
- add dma_parms->max_segment_size setting of drm_device->dev.
- use devm_kzalloc instead of kzalloc.

Changelog v2:
- fix iommu attach condition.
  . check archdata.dma_ops of drm device instead of
    subdrv device's one.
- code clean to exynos_drm_iommu.c file.
  . remove '#ifdef CONFIG_ARM_DMA_USE_IOMMU' from exynos_drm_iommu.c
    and add it to driver/gpu/drm/exynos/Kconfig.

Changelog v1:
This patch adds iommu support for exynos drm framework with dma mapping
api. In this patch, we used dma mapping api to allocate physical memory
and maps it with iommu table and removed some existing codes and added
new some codes for iommu support.

GEM allocation requires one device object to use dma mapping api so
this patch uses one iommu mapping for all sub drivers. In other words,
all sub drivers have same iommu mapping.

Signed-off-by: default avatarInki Dae <inki.dae@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
parent 549a17e4
Loading
Loading
Loading
Loading
+6 −0
Original line number Original line Diff line number Diff line
@@ -10,6 +10,12 @@ config DRM_EXYNOS
	  Choose this option if you have a Samsung SoC EXYNOS chipset.
	  Choose this option if you have a Samsung SoC EXYNOS chipset.
	  If M is selected the module will be called exynosdrm.
	  If M is selected the module will be called exynosdrm.


config DRM_EXYNOS_IOMMU
	bool "EXYNOS DRM IOMMU Support"
	depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
	help
	  Choose this option if you want to use IOMMU feature for DRM.

config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_DMABUF
	bool "EXYNOS DRM DMABUF"
	bool "EXYNOS DRM DMABUF"
	depends on DRM_EXYNOS
	depends on DRM_EXYNOS
+1 −0
Original line number Original line Diff line number Diff line
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
		exynos_drm_plane.o
		exynos_drm_plane.o


exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o \
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o \
+32 −56
Original line number Original line Diff line number Diff line
@@ -33,71 +33,58 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
static int lowlevel_buffer_allocate(struct drm_device *dev,
		unsigned int flags, struct exynos_drm_gem_buf *buf)
		unsigned int flags, struct exynos_drm_gem_buf *buf)
{
{
	dma_addr_t start_addr;
	int ret = 0;
	unsigned int npages, i = 0;
	unsigned int npages, i = 0;
	struct scatterlist *sgl;
	struct scatterlist *sgl;
	int ret = 0;
	enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;


	DRM_DEBUG_KMS("%s\n", __FILE__);
	DRM_DEBUG_KMS("%s\n", __FILE__);


	if (IS_NONCONTIG_BUFFER(flags)) {
		DRM_DEBUG_KMS("not support allocation type.\n");
		return -EINVAL;
	}

	if (buf->dma_addr) {
	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
		return 0;
	}
	}


	if (buf->size >= SZ_1M) {
	init_dma_attrs(&buf->dma_attrs);
		npages = buf->size >> SECTION_SHIFT;

		buf->page_size = SECTION_SIZE;
	if (flags & EXYNOS_BO_NONCONTIG)
	} else if (buf->size >= SZ_64K) {
		attr = DMA_ATTR_WRITE_COMBINE;
		npages = buf->size >> 16;

		buf->page_size = SZ_64K;
	dma_set_attr(attr, &buf->dma_attrs);
	} else {

		npages = buf->size >> PAGE_SHIFT;
	buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
		buf->page_size = PAGE_SIZE;
			&buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
	if (!buf->kvaddr) {
		DRM_ERROR("failed to allocate buffer.\n");
		return -ENOMEM;
	}
	}


	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!buf->sgt) {
	if (!buf->sgt) {
		DRM_ERROR("failed to allocate sg table.\n");
		DRM_ERROR("failed to allocate sg table.\n");
		return -ENOMEM;
		ret = -ENOMEM;
		goto err_free_attrs;
	}
	}


	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
	ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
			buf->size);
	if (ret < 0) {
	if (ret < 0) {
		DRM_ERROR("failed to initialize sg table.\n");
		DRM_ERROR("failed to get sgtable.\n");
		kfree(buf->sgt);
		goto err_free_sgt;
		buf->sgt = NULL;
		return -ENOMEM;
	}
	}


	buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
	npages = buf->sgt->nents;
			&buf->dma_addr, GFP_KERNEL);
	if (!buf->kvaddr) {
		DRM_ERROR("failed to allocate buffer.\n");
		ret = -ENOMEM;
		goto err1;
	}


	buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
	buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
	if (!buf->pages) {
	if (!buf->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		ret = -ENOMEM;
		goto err2;
		goto err_free_table;
	}
	}


	sgl = buf->sgt->sgl;
	sgl = buf->sgt->sgl;
	start_addr = buf->dma_addr;

	while (i < npages) {
	while (i < npages) {
		buf->pages[i] = phys_to_page(start_addr);
		buf->pages[i] = sg_page(sgl);
		sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
		sg_dma_address(sgl) = start_addr;
		start_addr += buf->page_size;
		sgl = sg_next(sgl);
		sgl = sg_next(sgl);
		i++;
		i++;
	}
	}
@@ -108,14 +95,16 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
			buf->size);
			buf->size);


	return ret;
	return ret;
err2:

	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
err_free_table:
			(dma_addr_t)buf->dma_addr);
	buf->dma_addr = (dma_addr_t)NULL;
err1:
	sg_free_table(buf->sgt);
	sg_free_table(buf->sgt);
err_free_sgt:
	kfree(buf->sgt);
	kfree(buf->sgt);
	buf->sgt = NULL;
	buf->sgt = NULL;
err_free_attrs:
	dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;


	return ret;
	return ret;
}
}
@@ -125,16 +114,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
{
	DRM_DEBUG_KMS("%s.\n", __FILE__);
	DRM_DEBUG_KMS("%s.\n", __FILE__);


	/*
	 * release only physically continuous memory and
	 * non-continuous memory would be released by exynos
	 * gem framework.
	 */
	if (IS_NONCONTIG_BUFFER(flags)) {
		DRM_DEBUG_KMS("not support allocation type.\n");
		return;
	}

	if (!buf->dma_addr) {
	if (!buf->dma_addr) {
		DRM_DEBUG_KMS("dma_addr is invalid.\n");
		DRM_DEBUG_KMS("dma_addr is invalid.\n");
		return;
		return;
@@ -150,11 +129,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
	kfree(buf->sgt);
	kfree(buf->sgt);
	buf->sgt = NULL;
	buf->sgt = NULL;


	kfree(buf->pages);
	dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
	buf->pages = NULL;
				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);

	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
				(dma_addr_t)buf->dma_addr);
	buf->dma_addr = (dma_addr_t)NULL;
	buf->dma_addr = (dma_addr_t)NULL;
}
}


+37 −50
Original line number Original line Diff line number Diff line
@@ -30,29 +30,31 @@


#include <linux/dma-buf.h>
#include <linux/dma-buf.h>


static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
		unsigned int page_size)
					struct exynos_drm_gem_buf *buf)
{
{
	struct sg_table *sgt = NULL;
	struct sg_table *sgt = NULL;
	struct scatterlist *sgl;
	int ret;
	int i, ret;


	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
	if (!sgt)
		goto out;
		goto out;


	ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
	ret = sg_alloc_table(sgt, buf->sgt->nents, GFP_KERNEL);
	if (ret)
	if (ret)
		goto err_free_sgt;
		goto err_free_sgt;


	if (page_size < PAGE_SIZE)
	ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
		page_size = PAGE_SIZE;
				buf->dma_addr, buf->size);

	if (ret < 0) {
	for_each_sg(sgt->sgl, sgl, nr_pages, i)
		DRM_ERROR("failed to get sgtable.\n");
		sg_set_page(sgl, pages[i], page_size, 0);
		goto err_free_table;
	}


	return sgt;
	return sgt;


err_free_table:
	sg_free_table(sgt);
err_free_sgt:
err_free_sgt:
	kfree(sgt);
	kfree(sgt);
	sgt = NULL;
	sgt = NULL;
@@ -68,32 +70,31 @@ static struct sg_table *
	struct drm_device *dev = gem_obj->base.dev;
	struct drm_device *dev = gem_obj->base.dev;
	struct exynos_drm_gem_buf *buf;
	struct exynos_drm_gem_buf *buf;
	struct sg_table *sgt = NULL;
	struct sg_table *sgt = NULL;
	unsigned int npages;
	int nents;
	int nents;


	DRM_DEBUG_PRIME("%s\n", __FILE__);
	DRM_DEBUG_PRIME("%s\n", __FILE__);


	mutex_lock(&dev->struct_mutex);

	buf = gem_obj->buffer;
	buf = gem_obj->buffer;

	if (!buf) {
	/* there should always be pages allocated. */
		DRM_ERROR("buffer is null.\n");
	if (!buf->pages) {
		return sgt;
		DRM_ERROR("pages is null.\n");
		goto err_unlock;
	}
	}


	npages = buf->size / buf->page_size;
	mutex_lock(&dev->struct_mutex);


	sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
	sgt = exynos_get_sgt(dev, buf);
	if (!sgt) {
	if (!sgt)
		DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
		goto err_unlock;
		goto err_unlock;
	}

	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
	nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
	if (!nents) {
		DRM_ERROR("failed to map sgl with iommu.\n");
		sgt = NULL;
		goto err_unlock;
	}


	DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
	DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n",
			npages, buf->size, buf->page_size);
			buf->size, buf->page_size);


err_unlock:
err_unlock:
	mutex_unlock(&dev->struct_mutex);
	mutex_unlock(&dev->struct_mutex);
@@ -105,6 +106,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
						enum dma_data_direction dir)
						enum dma_data_direction dir)
{
{
	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
	dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);

	sg_free_table(sgt);
	sg_free_table(sgt);
	kfree(sgt);
	kfree(sgt);
	sgt = NULL;
	sgt = NULL;
@@ -196,7 +198,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
	struct scatterlist *sgl;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;
	int ret;


	DRM_DEBUG_PRIME("%s\n", __FILE__);
	DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +234,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
		goto err_unmap_attach;
		goto err_unmap_attach;
	}
	}


	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		ret = -ENOMEM;
		goto err_free_pages;
		goto err_free_buffer;
	}
	}


	sgl = sgt->sgl;
	sgl = sgt->sgl;


	if (sgt->nents == 1) {
	buffer->size = dma_buf->size;
		buffer->dma_addr = sg_dma_address(sgt->sgl);
	buffer->dma_addr = sg_dma_address(sgl);
		buffer->size = sg_dma_len(sgt->sgl);


	if (sgt->nents == 1) {
		/* always physically continuous memory if sgt->nents is 1. */
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
	} else {
		unsigned int i = 0;
		/*

		 * this case could be CONTIG or NONCONTIG type but for now
		buffer->dma_addr = sg_dma_address(sgl);
		 * sets NONCONTIG.
		while (i < sgt->nents) {
		 * TODO. we have to find a way that exporter can notify
			buffer->pages[i] = sg_page(sgl);
		 * the type of its own buffer to importer.
			buffer->size += sg_dma_len(sgl);
		 */
			sgl = sg_next(sgl);
			i++;
		}

		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}
	}


@@ -277,9 +267,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,


	return &exynos_gem_obj->base;
	return &exynos_gem_obj->base;


err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
err_free_buffer:
	kfree(buffer);
	kfree(buffer);
	buffer = NULL;
	buffer = NULL;
+20 −3
Original line number Original line Diff line number Diff line
@@ -40,6 +40,7 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_iommu.h"


#define DRIVER_NAME	"exynos"
#define DRIVER_NAME	"exynos"
#define DRIVER_DESC	"Samsung SoC DRM"
#define DRIVER_DESC	"Samsung SoC DRM"
@@ -66,6 +67,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
	INIT_LIST_HEAD(&private->pageflip_event_list);
	INIT_LIST_HEAD(&private->pageflip_event_list);
	dev->dev_private = (void *)private;
	dev->dev_private = (void *)private;


	/*
	 * create mapping to manage iommu table and set a pointer to iommu
	 * mapping structure to iommu_mapping of private data.
	 * also this iommu_mapping can be used to check if iommu is supported
	 * or not.
	 */
	ret = drm_create_iommu_mapping(dev);
	if (ret < 0) {
		DRM_ERROR("failed to create iommu mapping.\n");
		goto err_crtc;
	}

	drm_mode_config_init(dev);
	drm_mode_config_init(dev);


	/* init kms poll for handling hpd */
	/* init kms poll for handling hpd */
@@ -80,7 +93,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
	for (nr = 0; nr < MAX_CRTC; nr++) {
	for (nr = 0; nr < MAX_CRTC; nr++) {
		ret = exynos_drm_crtc_create(dev, nr);
		ret = exynos_drm_crtc_create(dev, nr);
		if (ret)
		if (ret)
			goto err_crtc;
			goto err_release_iommu_mapping;
	}
	}


	for (nr = 0; nr < MAX_PLANE; nr++) {
	for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +102,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)


		plane = exynos_plane_init(dev, possible_crtcs, false);
		plane = exynos_plane_init(dev, possible_crtcs, false);
		if (!plane)
		if (!plane)
			goto err_crtc;
			goto err_release_iommu_mapping;
	}
	}


	ret = drm_vblank_init(dev, MAX_CRTC);
	ret = drm_vblank_init(dev, MAX_CRTC);
	if (ret)
	if (ret)
		goto err_crtc;
		goto err_release_iommu_mapping;


	/*
	/*
	 * probe sub drivers such as display controller and hdmi driver,
	 * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +139,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
	exynos_drm_device_unregister(dev);
	exynos_drm_device_unregister(dev);
err_vblank:
err_vblank:
	drm_vblank_cleanup(dev);
	drm_vblank_cleanup(dev);
err_release_iommu_mapping:
	drm_release_iommu_mapping(dev);
err_crtc:
err_crtc:
	drm_mode_config_cleanup(dev);
	drm_mode_config_cleanup(dev);
	kfree(private);
	kfree(private);
@@ -142,6 +157,8 @@ static int exynos_drm_unload(struct drm_device *dev)
	drm_vblank_cleanup(dev);
	drm_vblank_cleanup(dev);
	drm_kms_helper_poll_fini(dev);
	drm_kms_helper_poll_fini(dev);
	drm_mode_config_cleanup(dev);
	drm_mode_config_cleanup(dev);

	drm_release_iommu_mapping(dev);
	kfree(dev->dev_private);
	kfree(dev->dev_private);


	dev->dev_private = NULL;
	dev->dev_private = NULL;
Loading