Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29defcc9 authored by Laura Abbott's avatar Laura Abbott Committed by Patrick Daly
Browse files

ion: Snapshot for 4.8 kernel upgrade



Take a snapshot of all ion files for the 4.8 kernel upgrade as of
commit 05f15e3f74c903e874d227609821724e381f0aba
("iommu: msm: ensure lazy mappings are unmapped on detach")

Change-Id: Icc5b48bb4ef444d37043f8cf091fdce878868a95
Signed-off-by: default avatarPatrick Daly <pdaly@codeaurora.org>
parent ff0116ec
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
#include <linux/vmalloc.h>
#include <linux/sizes.h>
#include <linux/cma.h>
#include <linux/msm_dma_iommu_mapping.h>

#include <asm/memory.h>
#include <asm/highmem.h>
@@ -2298,6 +2299,9 @@ static void __arm_iommu_detach_device(struct device *dev)
		return;
	}

	if (msm_dma_unmap_all_for_dev(dev))
		dev_warn(dev, "IOMMU detach with outstanding mappings\n");

	iommu_detach_device(mapping->domain, dev);
	kref_put(&mapping->kref, release_iommu_mapping);
	to_dma_iommu_mapping(dev) = NULL;
+4 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@
#include <asm/tlbflush.h>
#include <asm/dma-iommu.h>
#include <linux/dma-mapping-fast.h>
#include <linux/msm_dma_iommu_mapping.h>



@@ -2125,6 +2126,9 @@ void arm_iommu_detach_device(struct device *dev)
	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
					&s1_bypass);

	if (msm_dma_unmap_all_for_dev(dev))
		dev_warn(dev, "IOMMU detach with outstanding mappings\n");

	iommu_detach_device(mapping->domain, dev);
	kref_put(&mapping->kref, release_iommu_mapping);
	dev->archdata.mapping = NULL;
+12 −0
Original line number Diff line number Diff line
@@ -357,6 +357,18 @@ config ARM_SMMU_V3
	  Say Y here if your system includes an IOMMU device implementing
	  the ARM SMMUv3 architecture.

config QCOM_LAZY_MAPPING
	bool "Reference counted iommu-mapping support"
	depends on ION_MSM
	depends on IOMMU_API
	help
	  ION buffers may be shared between several software clients.
	  Reference counting the mapping may simplify coordination between
	  these clients, and decrease latency by preventing multiple
	  map/unmaps of the same region.

	  If unsure, say N here.

config S390_IOMMU
	def_bool y if S390 && PCI
	depends on S390 && PCI
+1 −1
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@ obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_API) += msm_dma_iommu_mapping.o
obj-$(CONFIG_QCOM_LAZY_MAPPING) += msm_dma_iommu_mapping.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+35 −5
Original line number Diff line number Diff line
@@ -145,13 +145,14 @@ static void msm_iommu_meta_put(struct msm_iommu_meta *meta);

static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
				   int nents, enum dma_data_direction dir,
				   struct dma_buf *dma_buf, int flags)
				   struct dma_buf *dma_buf,
				   unsigned long attrs)
{
	struct msm_iommu_map *iommu_map;
	struct msm_iommu_meta *iommu_meta = NULL;
	int ret = 0;
	bool extra_meta_ref_taken = false;
	bool late_unmap = (flags & MSM_DMA_ATTR_NO_DELAYED_UNMAP) == 0;
	int late_unmap = !(attrs & DMA_ATTR_NO_DELAYED_UNMAP);

	mutex_lock(&msm_iommu_map_mutex);
	iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
@@ -184,7 +185,7 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
			goto out_unlock;
		}

		ret = dma_map_sg(dev, sg, nents, dir);
		ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
		if (ret != nents) {
			kfree(iommu_map);
			goto out_unlock;
@@ -232,7 +233,7 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
 */
int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
		   enum dma_data_direction dir, struct dma_buf *dma_buf,
		   int flags)
		   unsigned long attrs)
{
	int ret;

@@ -251,7 +252,7 @@ int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
		return -EINVAL;
	}

	ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, flags);
	ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);

	return ret;
}
@@ -334,6 +335,35 @@ void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
}
EXPORT_SYMBOL(msm_dma_unmap_sg);

int msm_dma_unmap_all_for_dev(struct device *dev)
{
	int ret = 0;
	struct msm_iommu_meta *meta;
	struct rb_root *root;
	struct rb_node *meta_node;

	mutex_lock(&msm_iommu_map_mutex);
	root = &iommu_root;
	meta_node = rb_first(root);
	while (meta_node) {
		struct msm_iommu_map *iommu_map;

		meta = rb_entry(meta_node, struct msm_iommu_meta, node);
		mutex_lock(&meta->lock);
		list_for_each_entry(iommu_map, &meta->iommu_maps, lnode)
			if (iommu_map->dev == dev)
				if (!kref_put(&iommu_map->ref,
						msm_iommu_map_release))
					ret = -EINVAL;

		mutex_unlock(&meta->lock);
		meta_node = rb_next(meta_node);
	}
	mutex_unlock(&msm_iommu_map_mutex);

	return ret;
}

/*
 * Only to be called by ION code when a buffer is freed
 */
Loading