Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cde14bbf authored by Jan Beulich's avatar Jan Beulich Committed by Tony Luck
Browse files

[IA64] swiotlb bug fixes



This patch fixes
- marking I-cache clean of pages DMAed to now only done for IA64
- broken multiple inclusion in include/asm-x86_64/swiotlb.h
- missing call to mark_clean in swiotlb_sync_sg()
- a (perhaps only theoretical) issue in swiotlb_dma_supported() when
io_tlb_end is exactly at the end of memory

Signed-off-by: default avatarJan Beulich <jbeulich@novell.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 86afa9eb
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -129,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
}

/*
 * Since DMA is i-cache coherent, any (complete) pages that were written via
 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
 * flush them when they get mapped into an executable vm-area.
 */
void
dma_mark_clean(void *addr, size_t size)
{
	unsigned long pg_addr, end;

	pg_addr = PAGE_ALIGN((unsigned long) addr);
	end = (unsigned long) addr + size;
	while (pg_addr + PAGE_SIZE <= end) {
		struct page *page = virt_to_page(pg_addr);
		set_bit(PG_arch_1, &page->flags);
		pg_addr += PAGE_SIZE;
	}
}

inline void
ia64_set_rbs_bot (void)
{
+2 −0
Original line number Diff line number Diff line
@@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS;

#define free_dma(x)

void dma_mark_clean(void *addr, size_t size);

#endif /* _ASM_IA64_DMA_H */
+4 −3
Original line number Diff line number Diff line
#ifndef _ASM_SWIOTLB_H
#define _ASM_SWTIOLB_H 1

#define _ASM_SWIOTLB_H 1

#include <asm/dma-mapping.h>

@@ -52,4 +51,6 @@ extern int swiotlb;

extern void pci_swiotlb_init(void);

#endif /* _ASM_SWTIOLB_H */
static inline void dma_mark_clean(void *addr, size_t size) {}

#endif /* _ASM_SWIOTLB_H */
+8 −25
Original line number Diff line number Diff line
@@ -557,25 +557,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
	return dev_addr;
}

/*
 * Since DMA is i-cache coherent, any (complete) pages that were written via
 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
 * flush them when they get mapped into an executable vm-area.
 */
static void
mark_clean(void *addr, size_t size)
{
	unsigned long pg_addr, end;

	pg_addr = PAGE_ALIGN((unsigned long) addr);
	end = (unsigned long) addr + size;
	while (pg_addr + PAGE_SIZE <= end) {
		struct page *page = virt_to_page(pg_addr);
		set_bit(PG_arch_1, &page->flags);
		pg_addr += PAGE_SIZE;
	}
}

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous swiotlb_map_single call.  All
@@ -594,7 +575,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
		unmap_single(hwdev, dma_addr, size, dir);
	else if (dir == DMA_FROM_DEVICE)
		mark_clean(dma_addr, size);
		dma_mark_clean(dma_addr, size);
}

/*
@@ -617,7 +598,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
		sync_single(hwdev, dma_addr, size, dir, target);
	else if (dir == DMA_FROM_DEVICE)
		mark_clean(dma_addr, size);
		dma_mark_clean(dma_addr, size);
}

void
@@ -648,7 +629,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
		sync_single(hwdev, dma_addr, size, dir, target);
	else if (dir == DMA_FROM_DEVICE)
		mark_clean(dma_addr, size);
		dma_mark_clean(dma_addr, size);
}

void
@@ -698,7 +679,6 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
		dev_addr = virt_to_phys(addr);
		if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
			void *map = map_single(hwdev, addr, sg->length, dir);
			sg->dma_address = virt_to_bus(map);
			if (!map) {
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
@@ -707,6 +687,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
				sg[0].dma_length = 0;
				return 0;
			}
			sg->dma_address = virt_to_bus(map);
		} else
			sg->dma_address = dev_addr;
		sg->dma_length = sg->length;
@@ -730,7 +711,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
			unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
		else if (dir == DMA_FROM_DEVICE)
			mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}

/*
@@ -752,6 +733,8 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
			sync_single(hwdev, (void *) sg->dma_address,
				    sg->dma_length, dir, target);
		else if (dir == DMA_FROM_DEVICE)
			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}

void
@@ -783,7 +766,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
int
swiotlb_dma_supported (struct device *hwdev, u64 mask)
{
	return (virt_to_phys (io_tlb_end) - 1) <= mask;
	return virt_to_phys(io_tlb_end - 1) <= mask;
}

EXPORT_SYMBOL(swiotlb_init);