Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit aa010efb authored by Joerg Roedel's avatar Joerg Roedel
Browse files

dma-debug: be more careful when building reference entries



The current code is not very careful when it builds reference
dma_debug_entries which get passed to hash_bucket_find(). But since this
function changed to a best-fit algorithm these entries have to be more
acurate. This patch adds this higher level of accuracy.

Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent e5e8c5b9
Loading
Loading
Loading
Loading
+91 −43
Original line number Original line Diff line number Diff line
@@ -874,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
				"[addr=%p] [size=%llu]\n", addr, size);
				"[addr=%p] [size=%llu]\n", addr, size);
}
}


static void check_sync(struct device *dev, dma_addr_t addr,
static void check_sync(struct device *dev,
		       u64 size, u64 offset, int direction, bool to_cpu)
		       struct dma_debug_entry *ref,
		       bool to_cpu)
{
{
	struct dma_debug_entry ref = {
		.dev            = dev,
		.dev_addr       = addr,
		.size           = size,
		.direction      = direction,
	};
	struct dma_debug_entry *entry;
	struct dma_debug_entry *entry;
	struct hash_bucket *bucket;
	struct hash_bucket *bucket;
	unsigned long flags;
	unsigned long flags;


	bucket = get_hash_bucket(&ref, &flags);
	bucket = get_hash_bucket(ref, &flags);


	entry = hash_bucket_find(bucket, &ref);
	entry = hash_bucket_find(bucket, ref);


	if (!entry) {
	if (!entry) {
		err_printk(dev, NULL, "DMA-API: device driver tries "
		err_printk(dev, NULL, "DMA-API: device driver tries "
				"to sync DMA memory it has not allocated "
				"to sync DMA memory it has not allocated "
				"[device address=0x%016llx] [size=%llu bytes]\n",
				"[device address=0x%016llx] [size=%llu bytes]\n",
				(unsigned long long)addr, size);
				(unsigned long long)ref->dev_addr, ref->size);
		goto out;
		goto out;
	}
	}


	if ((offset + size) > entry->size) {
	if (ref->size > entry->size) {
		err_printk(dev, entry, "DMA-API: device driver syncs"
		err_printk(dev, entry, "DMA-API: device driver syncs"
				" DMA memory outside allocated range "
				" DMA memory outside allocated range "
				"[device address=0x%016llx] "
				"[device address=0x%016llx] "
				"[allocation size=%llu bytes] [sync offset=%llu] "
				"[allocation size=%llu bytes] "
				"[sync size=%llu]\n", entry->dev_addr, entry->size,
				"[sync offset+size=%llu]\n",
				offset, size);
				entry->dev_addr, entry->size,
				ref->size);
	}
	}


	if (direction != entry->direction) {
	if (ref->direction != entry->direction) {
		err_printk(dev, entry, "DMA-API: device driver syncs "
		err_printk(dev, entry, "DMA-API: device driver syncs "
				"DMA memory with different direction "
				"DMA memory with different direction "
				"[device address=0x%016llx] [size=%llu bytes] "
				"[device address=0x%016llx] [size=%llu bytes] "
				"[mapped with %s] [synced with %s]\n",
				"[mapped with %s] [synced with %s]\n",
				(unsigned long long)addr, entry->size,
				(unsigned long long)ref->dev_addr, entry->size,
				dir2name[entry->direction],
				dir2name[entry->direction],
				dir2name[direction]);
				dir2name[ref->direction]);
	}
	}


	if (entry->direction == DMA_BIDIRECTIONAL)
	if (entry->direction == DMA_BIDIRECTIONAL)
		goto out;
		goto out;


	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
	if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
		      !(direction == DMA_TO_DEVICE))
		      !(ref->direction == DMA_TO_DEVICE))
		err_printk(dev, entry, "DMA-API: device driver syncs "
		err_printk(dev, entry, "DMA-API: device driver syncs "
				"device read-only DMA memory for cpu "
				"device read-only DMA memory for cpu "
				"[device address=0x%016llx] [size=%llu bytes] "
				"[device address=0x%016llx] [size=%llu bytes] "
				"[mapped with %s] [synced with %s]\n",
				"[mapped with %s] [synced with %s]\n",
				(unsigned long long)addr, entry->size,
				(unsigned long long)ref->dev_addr, entry->size,
				dir2name[entry->direction],
				dir2name[entry->direction],
				dir2name[direction]);
				dir2name[ref->direction]);


	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
	if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
		       !(direction == DMA_FROM_DEVICE))
		       !(ref->direction == DMA_FROM_DEVICE))
		err_printk(dev, entry, "DMA-API: device driver syncs "
		err_printk(dev, entry, "DMA-API: device driver syncs "
				"device write-only DMA memory to device "
				"device write-only DMA memory to device "
				"[device address=0x%016llx] [size=%llu bytes] "
				"[device address=0x%016llx] [size=%llu bytes] "
				"[mapped with %s] [synced with %s]\n",
				"[mapped with %s] [synced with %s]\n",
				(unsigned long long)addr, entry->size,
				(unsigned long long)ref->dev_addr, entry->size,
				dir2name[entry->direction],
				dir2name[entry->direction],
				dir2name[direction]);
				dir2name[ref->direction]);


out:
out:
	put_hash_bucket(bucket, &flags);
	put_hash_bucket(bucket, &flags);
@@ -1037,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
}
}
EXPORT_SYMBOL(debug_dma_map_sg);
EXPORT_SYMBOL(debug_dma_map_sg);


static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
static int get_nr_mapped_entries(struct device *dev,
				 struct dma_debug_entry *ref)
{
{
	struct dma_debug_entry *entry, ref;
	struct dma_debug_entry *entry;
	struct hash_bucket *bucket;
	struct hash_bucket *bucket;
	unsigned long flags;
	unsigned long flags;
	int mapped_ents;
	int mapped_ents;


	ref.dev      = dev;
	bucket       = get_hash_bucket(ref, &flags);
	ref.dev_addr = sg_dma_address(s);
	entry        = hash_bucket_find(bucket, ref);
	ref.size     = sg_dma_len(s),

	bucket       = get_hash_bucket(&ref, &flags);
	entry        = hash_bucket_find(bucket, &ref);
	mapped_ents  = 0;
	mapped_ents  = 0;


	if (entry)
	if (entry)
@@ -1084,7 +1077,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
			break;
			break;


		if (!i)
		if (!i)
			mapped_ents = get_nr_mapped_entries(dev, s);
			mapped_ents = get_nr_mapped_entries(dev, &ref);


		check_unmap(&ref);
		check_unmap(&ref);
	}
	}
@@ -1139,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
				   size_t size, int direction)
				   size_t size, int direction)
{
{
	struct dma_debug_entry ref;

	if (unlikely(global_disable))
	if (unlikely(global_disable))
		return;
		return;


	check_sync(dev, dma_handle, size, 0, direction, true);
	ref.type         = dma_debug_single;
	ref.dev          = dev;
	ref.dev_addr     = dma_handle;
	ref.size         = size;
	ref.direction    = direction;
	ref.sg_call_ents = 0;

	check_sync(dev, &ref, true);
}
}
EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);


@@ -1150,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
				      dma_addr_t dma_handle, size_t size,
				      dma_addr_t dma_handle, size_t size,
				      int direction)
				      int direction)
{
{
	struct dma_debug_entry ref;

	if (unlikely(global_disable))
	if (unlikely(global_disable))
		return;
		return;


	check_sync(dev, dma_handle, size, 0, direction, false);
	ref.type         = dma_debug_single;
	ref.dev          = dev;
	ref.dev_addr     = dma_handle;
	ref.size         = size;
	ref.direction    = direction;
	ref.sg_call_ents = 0;

	check_sync(dev, &ref, false);
}
}
EXPORT_SYMBOL(debug_dma_sync_single_for_device);
EXPORT_SYMBOL(debug_dma_sync_single_for_device);


@@ -1162,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
					 unsigned long offset, size_t size,
					 unsigned long offset, size_t size,
					 int direction)
					 int direction)
{
{
	struct dma_debug_entry ref;

	if (unlikely(global_disable))
	if (unlikely(global_disable))
		return;
		return;


	check_sync(dev, dma_handle, size, offset, direction, true);
	ref.type         = dma_debug_single;
	ref.dev          = dev;
	ref.dev_addr     = dma_handle;
	ref.size         = offset + size;
	ref.direction    = direction;
	ref.sg_call_ents = 0;

	check_sync(dev, &ref, true);
}
}
EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);


@@ -1174,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
					    unsigned long offset,
					    unsigned long offset,
					    size_t size, int direction)
					    size_t size, int direction)
{
{
	struct dma_debug_entry ref;

	if (unlikely(global_disable))
	if (unlikely(global_disable))
		return;
		return;


	check_sync(dev, dma_handle, size, offset, direction, false);
	ref.type         = dma_debug_single;
	ref.dev          = dev;
	ref.dev_addr     = dma_handle;
	ref.size         = offset + size;
	ref.direction    = direction;
	ref.sg_call_ents = 0;

	check_sync(dev, &ref, false);
}
}
EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);


@@ -1191,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
		return;
		return;


	for_each_sg(sg, s, nelems, i) {
	for_each_sg(sg, s, nelems, i) {

		struct dma_debug_entry ref = {
			.type           = dma_debug_sg,
			.dev            = dev,
			.paddr          = sg_phys(s),
			.dev_addr       = sg_dma_address(s),
			.size           = sg_dma_len(s),
			.direction      = direction,
			.sg_call_ents   = nelems,
		};

		if (!i)
		if (!i)
			mapped_ents = get_nr_mapped_entries(dev, s);
			mapped_ents = get_nr_mapped_entries(dev, &ref);


		if (i >= mapped_ents)
		if (i >= mapped_ents)
			break;
			break;


		check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
		check_sync(dev, &ref, true);
			   direction, true);
	}
	}
}
}
EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1213,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
		return;
		return;


	for_each_sg(sg, s, nelems, i) {
	for_each_sg(sg, s, nelems, i) {

		struct dma_debug_entry ref = {
			.type           = dma_debug_sg,
			.dev            = dev,
			.paddr          = sg_phys(s),
			.dev_addr       = sg_dma_address(s),
			.size           = sg_dma_len(s),
			.direction      = direction,
			.sg_call_ents   = nelems,
		};
		if (!i)
		if (!i)
			mapped_ents = get_nr_mapped_entries(dev, s);
			mapped_ents = get_nr_mapped_entries(dev, &ref);


		if (i >= mapped_ents)
		if (i >= mapped_ents)
			break;
			break;


		check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
		check_sync(dev, &ref, false);
			   direction, false);
	}
	}
}
}
EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
EXPORT_SYMBOL(debug_dma_sync_sg_for_device);