Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b222ff2 authored by Sudarshan Rajagopalan's avatar Sudarshan Rajagopalan
Browse files

iommu: msm: Update lazy in compliance with upstream dma ops



During lazy dma_map_sg, only some entries of the caller's sg list
are stored in the msm_iommu_map->sgl. And lazy dma_unmap_sg uses
this incomplete sgl to perform sg_list walk to determine the total
iova size to unmap (all entries/segments are mapped into a single
contiguous iova). Since sg->page_link is missing, the sg_list walk
ends up into null pointer dereference kernel crash:

	BUG: Unable to handle kernel NULL pointer dereference at virtual
	address 00000018
	PC is at iommu_dma_unmap_sg+0x4c/0xdc
	[...]
	iommu_dma_unmap_sg+0x4c/0xdc
	__iommu_unmap_sg_attrs+0x64/0x6c
	msm_iommu_map_release+0x154/0x164
	msm_dma_buf_freed+0x168/0x3c8
	_ion_buffer_destroy+0x30/0x88
	ion_buffer_put+0x40/0x50
	ion_handle_destroy+0xec/0x10c
	ion_handle_put_nolock+0x40/0x50
	ion_ioctl+0x2ec/0x4d4
	do_vfs_ioctl+0xd0/0x85c
	SyS_ioctl+0x90/0xa4
	el0_svc_naked+0x24/0x28

Hence, clone/duplicate the caller's sg list into msm_iommu_map->sgl.
Also, update lazy map/unmap_sg to check DMA_ATTR_SKIP_CPU_SYNC to
skip cache maintenance only if asked for.

Change-Id: Idb7bd52d84d27ad0c7873208a3e25129f20d07da
Signed-off-by: default avatarSudarshan Rajagopalan <sudaraja@codeaurora.org>
parent 4c28966f
Loading
Loading
Loading
Loading
+55 −12
Original line number Diff line number Diff line
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,7 @@
 * @dir - The direction for the unmap.
 * @meta - Backpointer to the meta this guy belongs to.
 * @ref - for reference counting this mapping
 * @attrs - dma mapping attributes
 *
 * Represents a mapping of one dma_buf buffer to a particular device
 * and address range. There may exist other mappings of this buffer in
@@ -39,11 +40,12 @@ struct msm_iommu_map {
	struct list_head lnode;
	struct rb_node node;
	struct device *dev;
	struct scatterlist sgl;
	struct scatterlist *sgl;
	unsigned int nents;
	enum dma_data_direction dir;
	struct msm_iommu_meta *meta;
	struct kref ref;
	unsigned long attrs;
};

struct msm_iommu_meta {
@@ -144,6 +146,22 @@ static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)

static void msm_iommu_meta_put(struct msm_iommu_meta *meta);

static struct scatterlist *clone_sgl(struct scatterlist *sg, int nents)
{
	struct scatterlist *next, *s;
	int i;
	struct sg_table table;

	if (sg_alloc_table(&table, nents, GFP_KERNEL))
		return NULL;
	next = table.sgl;
	for_each_sg(sg, s, nents, i) {
		*next = *s;
		next = sg_next(next);
	}
	return table.sgl;
}

static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
				   int nents, enum dma_data_direction dir,
				   struct dma_buf *dma_buf,
@@ -187,25 +205,36 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
		}

		ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
		if (ret != nents) {
		if (!ret) {
			kfree(iommu_map);
			goto out_unlock;
		}

		iommu_map->sgl = clone_sgl(sg, nents);
		if (!iommu_map->sgl) {
			kfree(iommu_map);
			ret = -ENOMEM;
			goto out_unlock;
		}
		iommu_map->nents = nents;
		iommu_map->dev = dev;

		kref_init(&iommu_map->ref);
		if (late_unmap)
			kref_get(&iommu_map->ref);
		iommu_map->meta = iommu_meta;
		iommu_map->sgl.dma_address = sg->dma_address;
		iommu_map->sgl.dma_length = sg->dma_length;
		iommu_map->dev = dev;
		msm_iommu_add(iommu_meta, iommu_map);

	} else {
		sg->dma_address = iommu_map->sgl.dma_address;
		sg->dma_length = iommu_map->sgl.dma_length;
		sg->dma_address = iommu_map->sgl->dma_address;
		sg->dma_length = iommu_map->sgl->dma_length;

		kref_get(&iommu_map->ref);

		if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
			dma_sync_sg_for_device(dev, iommu_map->sgl,
					iommu_map->nents, iommu_map->dir);

		if (is_device_dma_coherent(dev))
			/*
			 * Ensure all outstanding changes for coherent
@@ -289,14 +318,24 @@ static void msm_iommu_map_release(struct kref *kref)
{
	struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
						ref);
	struct sg_table table;

	table.nents = table.orig_nents = map->nents;
	table.sgl = map->sgl;
	list_del(&map->lnode);
	dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);

	/* Skip an additional cache maintenance on the dma unmap path */
	if (!(map->attrs & DMA_ATTR_SKIP_CPU_SYNC))
		map->attrs |= DMA_ATTR_SKIP_CPU_SYNC;
	dma_unmap_sg_attrs(map->dev, map->sgl, map->nents, map->dir,
			map->attrs);
	sg_free_table(&table);
	kfree(map);
}

void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
		      enum dma_data_direction dir, struct dma_buf *dma_buf)
void msm_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
			    int nents, enum dma_data_direction dir,
			    struct dma_buf *dma_buf, unsigned long attrs)
{
	struct msm_iommu_map *iommu_map;
	struct msm_iommu_meta *meta;
@@ -329,6 +368,10 @@ void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
	 */
	iommu_map->dir = dir;

	if (attrs && ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0))
		dma_sync_sg_for_cpu(dev, iommu_map->sgl, iommu_map->nents, dir);

	iommu_map->attrs = attrs;
	kref_put(&iommu_map->ref, msm_iommu_map_release);
	mutex_unlock(&meta->lock);

@@ -337,7 +380,7 @@ void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
out:
	return;
}
EXPORT_SYMBOL(msm_dma_unmap_sg);
EXPORT_SYMBOL(msm_dma_unmap_sg_attrs);

int msm_dma_unmap_all_for_dev(struct device *dev)
{
+4 −2
Original line number Diff line number Diff line
@@ -328,8 +328,10 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
			      enum dma_data_direction direction)
{
	if (attachment->dma_map_attrs & DMA_ATTR_DELAYED_UNMAP)
		msm_dma_unmap_sg(attachment->dev, table->sgl, table->nents,
				 direction, attachment->dmabuf);
		msm_dma_unmap_sg_attrs(attachment->dev, table->sgl,
				       table->nents, direction,
				       attachment->dmabuf,
				       attachment->dma_map_attrs);
	else
		dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
				   direction, attachment->dma_map_attrs);
+4 −3
Original line number Diff line number Diff line
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -46,8 +46,9 @@ static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
	return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, attrs);
}

void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
		      enum dma_data_direction dir, struct dma_buf *dma_buf);
void msm_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
			    int nents, enum dma_data_direction dir,
			    struct dma_buf *dma_buf, unsigned long attrs);

int msm_dma_unmap_all_for_dev(struct device *dev);