Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit acbd53b4 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "iommu: msm: validate subsequent lazy map arguments"

parents 6dd27a37 a67eb07a
Loading
Loading
Loading
Loading
+43 −21
Original line number Diff line number Diff line
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -27,9 +27,11 @@
 * @dev - Device this is mapped to. Used as key
 * @sgl - The scatterlist for this mapping
 * @nents - Number of entries in sgl
 * @dir - The direction for the unmap.
 * @dir - The direction for the map.
 * @meta - Backpointer to the meta this guy belongs to.
 * @ref - for reference counting this mapping
 * @map_attrs - dma mapping attributes
 * @buf_start_addr - address of start of buffer
 *
 * Represents a mapping of one dma_buf buffer to a particular device
 * and address range. There may exist other mappings of this buffer in
@@ -44,6 +46,8 @@ struct msm_iommu_map {
	enum dma_data_direction dir;
	struct msm_iommu_meta *meta;
	struct kref ref;
	unsigned long map_attrs;
	dma_addr_t buf_start_addr;
};

struct msm_iommu_meta {
@@ -199,9 +203,17 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
		iommu_map->sgl.dma_address = sg->dma_address;
		iommu_map->sgl.dma_length = sg->dma_length;
		iommu_map->dev = dev;
		iommu_map->dir = dir;
		iommu_map->nents = nents;
		iommu_map->map_attrs = attrs;
		iommu_map->buf_start_addr = sg_phys(sg);
		msm_iommu_add(iommu_meta, iommu_map);

	} else {
		if (nents == iommu_map->nents &&
		    dir == iommu_map->dir &&
		    attrs == iommu_map->map_attrs &&
		    sg_phys(sg) == iommu_map->buf_start_addr) {
			sg->dma_address = iommu_map->sgl.dma_address;
			sg->dma_length = iommu_map->sgl.dma_length;

@@ -214,6 +226,20 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
				 */
				dmb(ish);
			ret = nents;
		} else {
			bool start_diff = (sg_phys(sg) !=
					   iommu_map->buf_start_addr);

			dev_err(dev, "lazy map request differs:\n"
				"req dir:%d, original dir:%d\n"
				"req nents:%d, original nents:%d\n"
				"req map attrs:%lu, original map attrs:%lu\n"
				"req buffer start address differs:%d\n",
				dir, iommu_map->dir, nents,
				iommu_map->nents, attrs, iommu_map->map_attrs,
				start_diff);
			ret = -EINVAL;
		}
	}
	mutex_unlock(&iommu_meta->lock);
	return ret;
@@ -321,13 +347,9 @@ void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
		goto out;
	}

	/*
	 * Save direction for later use when we actually unmap.
	 * Not used right now but in the future if we go to coherent mapping
	 * API we might want to call the appropriate API when client asks
	 * to unmap
	 */
	iommu_map->dir = dir;
	if (dir != iommu_map->dir)
		WARN(1, "%s: (%pK) dir:%d differs from original dir:%d\n",
		     __func__, dma_buf, dir, iommu_map->dir);

	kref_put(&iommu_map->ref, msm_iommu_map_release);
	mutex_unlock(&meta->lock);
+16 −1
Original line number Diff line number Diff line
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,21 @@ int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
		   enum dma_data_direction dir, struct dma_buf *dma_buf,
		   unsigned long attrs);

/*
 * This function takes an extra reference to the dma_buf.
 * What this means is that calling msm_dma_unmap_sg will not result in buffer's
 * iommu mapping being removed, which means that subsequent calls to lazy map
 * will simply re-use the existing iommu mapping.
 * The iommu unmapping of the buffer will occur when the ION buffer is
 * destroyed.
 * Using lazy mapping can provide a performance benefit because subsequent
 * mappings are faster.
 *
 * The limitation of using this API are that all subsequent iommu mappings
 * must be the same as the original mapping, ie they must map the same part of
 * the buffer with the same dma data direction. Also there can't be multiple
 * mappings of different parts of the buffer.
 */
static inline int msm_dma_map_sg_lazy(struct device *dev,
			       struct scatterlist *sg, int nents,
			       enum dma_data_direction dir,