Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 774e2ad3 authored by Olav Haugan's avatar Olav Haugan Committed by Yiduo Wang
Browse files

iommu: Add support for delayed unmapping of ion/dma_buf buffer



Add new APIs to allow clients to map and unmap dma_buffers created by
ION. The call to the unmap API will not actually do the unmapping from the
IOMMU. The unmapping will occur when the ION/dma_buf buffer is actually
freed. This behavior can be disabled with DMA_ATTR_NO_DELAYED_UNMAP

Change-Id: Ic4dbd3b582eb0388020c650cab5fbb1dad67ae81
Signed-off-by: default avatarOlav Haugan <ohaugan@codeaurora.org>
parent 125aa701
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_API) += msm_dma_iommu_mapping.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
+384 −0
Original line number Diff line number Diff line
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/mutex.h>
#include <linux/err.h>

#include <linux/msm_dma_iommu_mapping.h>

/**
 * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
 * @lnode - list node to exist in the buffer's list of iommu mappings
 * @dev - Device this is mapped to. Used as key
 * @sgl - The scatterlist for this mapping
 * @nents - Number of entries in sgl
 * @dir - The direction for the unmap.
 * @meta - Backpointer to the meta this guy belongs to.
 * @ref - for reference counting this mapping
 *
 * Represents a mapping of one dma_buf buffer to a particular device
 * and address range. There may exist other mappings of this buffer in
 * different devices. All mappings will have the same cacheability and security.
 */
struct msm_iommu_map {
	struct list_head lnode;
	struct rb_node node;
	struct device *dev;
	struct scatterlist sgl;
	unsigned int nents;
	enum dma_data_direction dir;
	struct msm_iommu_meta *meta;
	struct kref ref;
};

struct msm_iommu_meta {
	struct rb_node node;
	struct list_head iommu_maps;
	struct kref ref;
	struct mutex lock;
	void *buffer;
};

static struct rb_root iommu_root;
static DEFINE_MUTEX(msm_iommu_map_mutex);

static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
{
	struct rb_root *root = &iommu_root;
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct msm_iommu_meta *entry;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct msm_iommu_meta, node);

		if (meta->buffer < entry->buffer) {
			p = &(*p)->rb_left;
		} else if (meta->buffer > entry->buffer) {
			p = &(*p)->rb_right;
		} else {
			pr_err("%s: dma_buf %p already exists\n", __func__,
				entry->buffer);
			BUG();
		}
	}

	rb_link_node(&meta->node, parent, p);
	rb_insert_color(&meta->node, root);
}

static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
{
	struct rb_root *root = &iommu_root;
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct msm_iommu_meta *entry = NULL;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct msm_iommu_meta, node);

		if (buffer < entry->buffer)
			p = &(*p)->rb_left;
		else if (buffer > entry->buffer)
			p = &(*p)->rb_right;
		else
			return entry;
	}

	return NULL;
}

static void msm_iommu_add(struct msm_iommu_meta *meta,
			  struct msm_iommu_map *iommu)
{
	struct msm_iommu_map *entry;

	list_for_each_entry(entry, &meta->iommu_maps, lnode) {
		if (entry->dev == iommu->dev) {
			pr_err("%s: dma_buf %p already has mapping to device %p\n",
				__func__, meta->buffer, iommu->dev);
			BUG();
		}
	}
	INIT_LIST_HEAD(&iommu->lnode);
	list_add(&iommu->lnode, &meta->iommu_maps);
}


static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
					      struct device *dev)
{
	struct msm_iommu_map *entry;

	list_for_each_entry(entry, &meta->iommu_maps, lnode) {
		if (entry->dev == dev)
			return entry;
	}

	return NULL;
}

static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
{
	struct msm_iommu_meta *meta;

	meta = kzalloc(sizeof(*meta), GFP_KERNEL);

	if (!meta)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&meta->iommu_maps);
	meta->buffer = dma_buf->priv;
	kref_init(&meta->ref);
	mutex_init(&meta->lock);
	msm_iommu_meta_add(meta);

	return meta;
}

static void msm_iommu_meta_put(struct msm_iommu_meta *meta);

static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
				   int nents, enum dma_data_direction dir,
				   struct dma_buf *dma_buf, int flags)
{
	struct msm_iommu_map *iommu_map;
	struct msm_iommu_meta *iommu_meta = NULL;
	int ret = 0;
	bool extra_meta_ref_taken = false;
	bool late_unmap = (flags & MSM_DMA_ATTR_NO_DELAYED_UNMAP) == 0;

	mutex_lock(&msm_iommu_map_mutex);
	iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);

	if (!iommu_meta) {
		iommu_meta = msm_iommu_meta_create(dma_buf);

		if (IS_ERR(iommu_meta)) {
			mutex_unlock(&msm_iommu_map_mutex);
			ret = PTR_ERR(iommu_meta);
			goto out;
		}
		if (late_unmap) {
			kref_get(&iommu_meta->ref);
			extra_meta_ref_taken = true;
		}
	} else {
		kref_get(&iommu_meta->ref);
	}

	mutex_unlock(&msm_iommu_map_mutex);

	mutex_lock(&iommu_meta->lock);
	iommu_map = msm_iommu_lookup(iommu_meta, dev);
	if (!iommu_map) {
		iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);

		if (!iommu_map) {
			ret = -ENOMEM;
			goto out_unlock;
		}

		ret = dma_map_sg(dev, sg, nents, dir);
		if (ret != nents) {
			kfree(iommu_map);
			goto out_unlock;
		}

		kref_init(&iommu_map->ref);
		if (late_unmap)
			kref_get(&iommu_map->ref);
		iommu_map->meta = iommu_meta;
		iommu_map->sgl.dma_address = sg->dma_address;
		iommu_map->sgl.dma_length = sg->dma_length;
		iommu_map->dev = dev;
		msm_iommu_add(iommu_meta, iommu_map);

	} else {
		sg->dma_address = iommu_map->sgl.dma_address;
		sg->dma_length = iommu_map->sgl.dma_length;

		kref_get(&iommu_map->ref);
		/*
		 * Need to do cache operations here based on "dir" in the
		 * future if we go with coherent mappings.
		 */
		ret = nents;
	}
	mutex_unlock(&iommu_meta->lock);
	return ret;

out_unlock:
	mutex_unlock(&iommu_meta->lock);
out:
	if (!IS_ERR(iommu_meta)) {
		if (extra_meta_ref_taken)
			msm_iommu_meta_put(iommu_meta);
		msm_iommu_meta_put(iommu_meta);
	}
	return ret;

}

/*
 * We are not taking a reference to the dma_buf here. It is expected that
 * clients hold reference to the dma_buf until they are done with mapping and
 * unmapping.
 */
int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
		   enum dma_data_direction dir, struct dma_buf *dma_buf,
		   int flags)
{
	int ret;

	if (IS_ERR_OR_NULL(dev)) {
		pr_err("%s: dev pointer is invalid\n", __func__);
		return -EINVAL;
	}

	if (IS_ERR_OR_NULL(sg)) {
		pr_err("%s: sg table pointer is invalid\n", __func__);
		return -EINVAL;
	}

	if (IS_ERR_OR_NULL(dma_buf)) {
		pr_err("%s: dma_buf pointer is invalid\n", __func__);
		return -EINVAL;
	}

	ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, flags);

	return ret;
}

static void msm_iommu_meta_destroy(struct kref *kref)
{
	struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
						ref);

	if (!list_empty(&meta->iommu_maps)) {
		WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n", __func__,
			meta->buffer);
	}
	rb_erase(&meta->node, &iommu_root);
	kfree(meta);
}

static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
{
	/*
	 * Need to lock here to prevent race against map/unmap
	 */
	mutex_lock(&msm_iommu_map_mutex);
	kref_put(&meta->ref, msm_iommu_meta_destroy);
	mutex_unlock(&msm_iommu_map_mutex);
}

static void msm_iommu_map_release(struct kref *kref)
{
	struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
						ref);

	list_del(&map->lnode);
	dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
	kfree(map);
}

void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
		      enum dma_data_direction dir, struct dma_buf *dma_buf)
{
	struct msm_iommu_map *iommu_map;
	struct msm_iommu_meta *meta;

	mutex_lock(&msm_iommu_map_mutex);
	meta = msm_iommu_meta_lookup(dma_buf->priv);
	if (!meta) {
		WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
		mutex_unlock(&msm_iommu_map_mutex);
		goto out;

	}
	mutex_unlock(&msm_iommu_map_mutex);

	mutex_lock(&meta->lock);
	iommu_map = msm_iommu_lookup(meta, dev);

	if (!iommu_map) {
		WARN(1, "%s: (%p) was never mapped for device  %p\n", __func__,
				dma_buf, dev);
		mutex_unlock(&meta->lock);
		goto out;
	}

	/*
	 * Save direction for later use when we actually unmap.
	 * Not used right now but in the future if we go to coherent mapping
	 * API we might want to call the appropriate API when client asks
	 * to unmap
	 */
	iommu_map->dir = dir;

	kref_put(&iommu_map->ref, msm_iommu_map_release);
	mutex_unlock(&meta->lock);

	msm_iommu_meta_put(meta);

out:
	return;
}

/*
 * Only to be called by ION code when a buffer is freed
 */
void msm_dma_buf_freed(void *buffer)
{
	struct msm_iommu_map *iommu_map;
	struct msm_iommu_map *iommu_map_next;
	struct msm_iommu_meta *meta;

	mutex_lock(&msm_iommu_map_mutex);
	meta = msm_iommu_meta_lookup(buffer);
	if (!meta) {
		/* Already unmapped (assuming no late unmapping) */
		mutex_unlock(&msm_iommu_map_mutex);
		goto out;

	}
	mutex_unlock(&msm_iommu_map_mutex);

	mutex_lock(&meta->lock);

	list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
				 lnode)
		kref_put(&iommu_map->ref, msm_iommu_map_release);

	if (!list_empty(&meta->iommu_maps)) {
		WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n", __func__,
			meta->buffer);
	}

	INIT_LIST_HEAD(&meta->iommu_maps);
	mutex_unlock(&meta->lock);

	msm_iommu_meta_put(meta);
out:
	return;

}
+6 −1
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@
#include <linux/dma-buf.h>
#include <linux/idr.h>
#include <linux/msm_ion.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <trace/events/kmem.h>


@@ -314,7 +315,11 @@ static void ion_buffer_get(struct ion_buffer *buffer)

static int ion_buffer_put(struct ion_buffer *buffer)
{
	return kref_put(&buffer->ref, _ion_buffer_destroy);
	int ret = kref_put(&buffer->ref, _ion_buffer_destroy);

	if (ret)
		msm_dma_buf_freed(buffer);
	return ret;
}

static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+61 −0
Original line number Diff line number Diff line
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#ifndef _LINUX_MSM_DMA_IOMMU_MAPPING_H
#define _LINUX_MSM_DMA_IOMMU_MAPPING_H

#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>

enum msm_dma_map_attr {
	MSM_DMA_ATTR_NO_DELAYED_UNMAP = 0x1,
};

/*
* This function is not taking a reference to the dma_buf here. It is expected
* that clients hold reference to the dma_buf until they are done with mapping
* and unmapping.
*/
int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
		   enum dma_data_direction dir, struct dma_buf *dma_buf,
		   int flags);

static inline int msm_dma_map_sg_lazy(struct device *dev,
			       struct scatterlist *sg, int nents,
			       enum dma_data_direction dir,
			       struct dma_buf *dma_buf)
{
	return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, 0);
}

static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
				  int nents, enum dma_data_direction dir,
				  struct dma_buf *dma_buf)
{
	return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf,
			      MSM_DMA_ATTR_NO_DELAYED_UNMAP);
}

void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
		      enum dma_data_direction dir, struct dma_buf *dma_buf);


/*
 * Below is private function only to be called by framework (ION) and not by
 * clients.
 */

void msm_dma_buf_freed(void *buffer);

#endif