Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e024506 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "staging: android: ion: Support iommu mappings with one segment" into msm-4.14

parents dfbe22fe 3ce275bf
Loading
Loading
Loading
Loading
+14 −2
Original line number Original line Diff line number Diff line
@@ -236,8 +236,20 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
		    (attrs & ~DMA_ATTR_SKIP_CPU_SYNC) ==
		    (attrs & ~DMA_ATTR_SKIP_CPU_SYNC) ==
		    (iommu_map->attrs & ~DMA_ATTR_SKIP_CPU_SYNC) &&
		    (iommu_map->attrs & ~DMA_ATTR_SKIP_CPU_SYNC) &&
		    sg_phys(sg) == iommu_map->buf_start_addr) {
		    sg_phys(sg) == iommu_map->buf_start_addr) {
			sg->dma_address = iommu_map->sgl->dma_address;
			struct scatterlist *sg_tmp = sg;
			sg->dma_length = iommu_map->sgl->dma_length;
			struct scatterlist *map_sg;
			int i;

			for_each_sg(iommu_map->sgl, map_sg, nents, i) {
				sg_dma_address(sg_tmp) = sg_dma_address(map_sg);
				sg_dma_len(sg_tmp) = sg_dma_len(map_sg);
				if (sg_dma_len(map_sg) == 0)
					break;

				sg_tmp = sg_next(sg_tmp);
				if (sg == NULL)
					break;
			}


			kref_get(&iommu_map->ref);
			kref_get(&iommu_map->ref);


+156 −61
Original line number Original line Diff line number Diff line
@@ -254,7 +254,8 @@ static struct sg_table *dup_sg_table(struct sg_table *table)
	new_sg = new_table->sgl;
	new_sg = new_table->sgl;
	for_each_sg(table->sgl, sg, table->nents, i) {
	for_each_sg(table->sgl, sg, table->nents, i) {
		memcpy(new_sg, sg, sizeof(*sg));
		memcpy(new_sg, sg, sizeof(*sg));
		new_sg->dma_address = 0;
		sg_dma_address(new_sg) = 0;
		sg_dma_len(new_sg) = 0;
		new_sg = sg_next(new_sg);
		new_sg = sg_next(new_sg);
	}
	}


@@ -523,7 +524,7 @@ static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
{
}
}


static void ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
			      unsigned int nents, unsigned long offset,
			      unsigned int nents, unsigned long offset,
			      unsigned long length,
			      unsigned long length,
			      enum dma_data_direction dir, bool for_cpu)
			      enum dma_data_direction dir, bool for_cpu)
@@ -531,10 +532,27 @@ static void ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
	int i;
	int i;
	struct scatterlist *sg;
	struct scatterlist *sg;
	unsigned int len = 0;
	unsigned int len = 0;
	dma_addr_t sg_dma_addr;

	for_each_sg(sgl, sg, nents, i) {
		if (sg_dma_len(sg) == 0)
			break;

		if (i > 0) {
			pr_warn("Partial cmo only supported with 1 segment\n"
				"is dma_set_max_seg_size being set on dev:%s\n",
				dev_name(dev));
			return -EINVAL;
		}
	}



	for_each_sg(sgl, sg, nents, i) {
	for_each_sg(sgl, sg, nents, i) {
		unsigned int sg_offset, sg_left, size = 0;
		unsigned int sg_offset, sg_left, size = 0;


		if (i == 0)
			sg_dma_addr = sg_dma_address(sg);

		len += sg->length;
		len += sg->length;
		if (len <= offset)
		if (len <= offset)
			continue;
			continue;
@@ -544,32 +562,42 @@ static void ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,


		size = (length < sg_left) ? length : sg_left;
		size = (length < sg_left) ? length : sg_left;
		if (for_cpu)
		if (for_cpu)
			dma_sync_single_range_for_cpu(dev, sg->dma_address,
			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
						      sg_offset, size, dir);
						      sg_offset, size, dir);
		else
		else
			dma_sync_single_range_for_device(dev, sg->dma_address,
			dma_sync_single_range_for_device(dev, sg_dma_addr,
							 sg_offset, size, dir);
							 sg_offset, size, dir);


		offset += size;
		offset += size;
		length -= size;
		length -= size;
		sg_dma_addr += sg->length;


		if (length == 0)
		if (length == 0)
			break;
			break;
	}
	}

	return 0;
}
}


static void ion_sgl_sync_mapped(struct device *dev, struct scatterlist *sgl,
static int ion_sgl_sync_mapped(struct device *dev, struct scatterlist *sgl,
			       unsigned int nents, struct list_head *vmas,
			       unsigned int nents, struct list_head *vmas,
			       enum dma_data_direction dir, bool for_cpu)
			       enum dma_data_direction dir, bool for_cpu)
{
{
	struct ion_vma_list *vma_list;
	struct ion_vma_list *vma_list;
	int ret = 0;


	list_for_each_entry(vma_list, vmas, list) {
	list_for_each_entry(vma_list, vmas, list) {
		struct vm_area_struct *vma = vma_list->vma;
		struct vm_area_struct *vma = vma_list->vma;


		ion_sgl_sync_range(dev, sgl, nents, vma->vm_pgoff * PAGE_SIZE,
		ret = ion_sgl_sync_range(dev, sgl, nents,
				   vma->vm_end - vma->vm_start, dir, for_cpu);
					 vma->vm_pgoff * PAGE_SIZE,
					 vma->vm_end - vma->vm_start, dir,
					 for_cpu);
		if (ret)
			break;
	}
	}

	return ret;
}
}


static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -612,23 +640,31 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
		struct device *dev = buffer->heap->priv;
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;
		struct sg_table *table = buffer->sg_table;


		trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
						     true, true, direction,
						     sync_only_mapped);

		if (sync_only_mapped)
		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
			ret = ion_sgl_sync_mapped(dev, table->sgl,
						  table->nents, &buffer->vmas,
						  table->nents, &buffer->vmas,
						  direction, true);
						  direction, true);
		else
		else
			dma_sync_sg_for_cpu(dev, table->sgl,
			dma_sync_sg_for_cpu(dev, table->sgl,
					    table->nents, direction);
					    table->nents, direction);


		if (!ret)
			trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
							     true, true,
							     direction,
							     sync_only_mapped);
		else
			trace_ion_begin_cpu_access_cmo_skip(dev, dmabuf->name,
							    true, true,
							    direction,
							    sync_only_mapped);
		mutex_unlock(&buffer->lock);
		mutex_unlock(&buffer->lock);
		goto out;
		goto out;
	}
	}


	list_for_each_entry(a, &buffer->attachments, list) {
	list_for_each_entry(a, &buffer->attachments, list) {
		int tmp = 0;

		if (!a->dma_mapped) {
		if (!a->dma_mapped) {
			trace_ion_begin_cpu_access_notmapped(a->dev,
			trace_ion_begin_cpu_access_notmapped(a->dev,
							     dmabuf->name,
							     dmabuf->name,
@@ -638,17 +674,29 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
			continue;
			continue;
		}
		}


		trace_ion_begin_cpu_access_cmo_apply(a->dev, dmabuf->name,
						     true, true, direction,
						     sync_only_mapped);

		if (sync_only_mapped)
		if (sync_only_mapped)
			ion_sgl_sync_mapped(a->dev, a->table->sgl,
			tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl,
					    a->table->nents, &buffer->vmas,
						  a->table->nents,
						  &buffer->vmas,
						  direction, true);
						  direction, true);
		else
		else
			dma_sync_sg_for_cpu(a->dev, a->table->sgl,
			dma_sync_sg_for_cpu(a->dev, a->table->sgl,
					    a->table->nents, direction);
					    a->table->nents, direction);

		if (!tmp) {
			trace_ion_begin_cpu_access_cmo_apply(a->dev,
							     dmabuf->name,
							     true, true,
							     direction,
							     sync_only_mapped);
		} else {
			trace_ion_begin_cpu_access_cmo_skip(a->dev,
							    dmabuf->name, true,
							    true, direction,
							    sync_only_mapped);
			ret = tmp;
		}

	}
	}
	mutex_unlock(&buffer->lock);
	mutex_unlock(&buffer->lock);


@@ -691,22 +739,30 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
		struct device *dev = buffer->heap->priv;
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;
		struct sg_table *table = buffer->sg_table;


		trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
						   true, true, direction,
						   sync_only_mapped);

		if (sync_only_mapped)
		if (sync_only_mapped)
			ion_sgl_sync_mapped(dev, table->sgl,
			ret = ion_sgl_sync_mapped(dev, table->sgl,
						  table->nents, &buffer->vmas,
						  table->nents, &buffer->vmas,
						  direction, false);
						  direction, false);
		else
		else
			dma_sync_sg_for_device(dev, table->sgl,
			dma_sync_sg_for_device(dev, table->sgl,
					       table->nents, direction);
					       table->nents, direction);

		if (!ret)
			trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
							   true, true,
							   direction,
							   sync_only_mapped);
		else
			trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->name,
							  true, true, direction,
							  sync_only_mapped);
		mutex_unlock(&buffer->lock);
		mutex_unlock(&buffer->lock);
		goto out;
		goto out;
	}
	}


	list_for_each_entry(a, &buffer->attachments, list) {
	list_for_each_entry(a, &buffer->attachments, list) {
		int tmp = 0;

		if (!a->dma_mapped) {
		if (!a->dma_mapped) {
			trace_ion_end_cpu_access_notmapped(a->dev,
			trace_ion_end_cpu_access_notmapped(a->dev,
							   dmabuf->name,
							   dmabuf->name,
@@ -716,17 +772,26 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
			continue;
			continue;
		}
		}


		trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
						   true, true, direction,
						   sync_only_mapped);

		if (sync_only_mapped)
		if (sync_only_mapped)
			ion_sgl_sync_mapped(a->dev, a->table->sgl,
			tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl,
					    a->table->nents, &buffer->vmas,
						  a->table->nents,
					    direction, false);
						  &buffer->vmas, direction,
						  false);
		else
		else
			dma_sync_sg_for_device(a->dev, a->table->sgl,
			dma_sync_sg_for_device(a->dev, a->table->sgl,
					       a->table->nents, direction);
					       a->table->nents, direction);

		if (!tmp) {
			trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
							   true, true,
							   direction,
							   sync_only_mapped);
		} else {
			trace_ion_end_cpu_access_cmo_skip(a->dev, dmabuf->name,
							  true, true, direction,
							  sync_only_mapped);
			ret = tmp;
		}
	}
	}
	mutex_unlock(&buffer->lock);
	mutex_unlock(&buffer->lock);


@@ -798,18 +863,24 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
		struct device *dev = buffer->heap->priv;
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;
		struct sg_table *table = buffer->sg_table;


		ret = ion_sgl_sync_range(dev, table->sgl, table->nents,
					 offset, len, dir, true);

		if (!ret)
			trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
			trace_ion_begin_cpu_access_cmo_apply(dev, dmabuf->name,
							     true, true, dir,
							     true, true, dir,
							     false);
							     false);

		else
		ion_sgl_sync_range(dev, table->sgl, table->nents,
			trace_ion_begin_cpu_access_cmo_skip(dev, dmabuf->name,
				   offset, len, dir, true);
							    true, true, dir,

							    false);
		mutex_unlock(&buffer->lock);
		mutex_unlock(&buffer->lock);
		goto out;
		goto out;
	}
	}


	list_for_each_entry(a, &buffer->attachments, list) {
	list_for_each_entry(a, &buffer->attachments, list) {
		int tmp = 0;

		if (!a->dma_mapped) {
		if (!a->dma_mapped) {
			trace_ion_begin_cpu_access_notmapped(a->dev,
			trace_ion_begin_cpu_access_notmapped(a->dev,
							     dmabuf->name,
							     dmabuf->name,
@@ -819,12 +890,22 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
			continue;
			continue;
		}
		}


		trace_ion_begin_cpu_access_cmo_apply(a->dev, dmabuf->name,
		tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
					 offset, len, dir, true);

		if (!tmp) {
			trace_ion_begin_cpu_access_cmo_apply(a->dev,
							     dmabuf->name,
							     true, true, dir,
							     true, true, dir,
							     false);
							     false);
		} else {
			trace_ion_begin_cpu_access_cmo_skip(a->dev,
							    dmabuf->name,
							    true, true, dir,
							    false);
			ret = tmp;
		}


		ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
				   offset, len, dir, true);
	}
	}
	mutex_unlock(&buffer->lock);
	mutex_unlock(&buffer->lock);


@@ -868,18 +949,25 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
		struct device *dev = buffer->heap->priv;
		struct device *dev = buffer->heap->priv;
		struct sg_table *table = buffer->sg_table;
		struct sg_table *table = buffer->sg_table;


		trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
		ret = ion_sgl_sync_range(dev, table->sgl, table->nents,
						   true, true, direction,
						   false);

		ion_sgl_sync_range(dev, table->sgl, table->nents,
					 offset, len, direction, false);
					 offset, len, direction, false);


		if (!ret)
			trace_ion_end_cpu_access_cmo_apply(dev, dmabuf->name,
							   true, true,
							   direction, false);
		else
			trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->name,
							  true, true,
							  direction, false);

		mutex_unlock(&buffer->lock);
		mutex_unlock(&buffer->lock);
		goto out;
		goto out;
	}
	}


	list_for_each_entry(a, &buffer->attachments, list) {
	list_for_each_entry(a, &buffer->attachments, list) {
		int tmp = 0;

		if (!a->dma_mapped) {
		if (!a->dma_mapped) {
			trace_ion_end_cpu_access_notmapped(a->dev,
			trace_ion_end_cpu_access_notmapped(a->dev,
							   dmabuf->name,
							   dmabuf->name,
@@ -889,13 +977,20 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
			continue;
			continue;
		}
		}


		tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
					 offset, len, direction, false);

		if (!tmp) {
			trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
			trace_ion_end_cpu_access_cmo_apply(a->dev, dmabuf->name,
							   true, true,
							   direction, false);

		} else {
			trace_ion_end_cpu_access_cmo_skip(a->dev, dmabuf->name,
							  true, true, direction,
							  true, true, direction,
							  false);
							  false);

			ret = tmp;
		ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents,
		}
				   offset, len, direction, false);

	}
	}
	mutex_unlock(&buffer->lock);
	mutex_unlock(&buffer->lock);