Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 063b8271 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Konrad Rzeszutek Wilk
Browse files

swiotlb-xen: ensure we have a single callsite for xen_dma_map_page



Refactor the code a bit to make further changes easier.

Reviewed-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 2e12dcee
Loading
Loading
Loading
Loading
+16 −15
Original line number Diff line number Diff line
@@ -388,13 +388,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
	if (dma_capable(dev, dev_addr, size) &&
	    !range_straddles_page_boundary(phys, size) &&
		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
		(swiotlb_force != SWIOTLB_FORCE)) {
		/* we are not interested in the dma_addr returned by
		 * xen_dma_map_page, only in the potential cache flushes executed
		 * by the function. */
		xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
		return dev_addr;
	}
		swiotlb_force != SWIOTLB_FORCE)
		goto done;

	/*
	 * Oh well, have to allocate and map a bounce buffer.
@@ -407,21 +402,27 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
		return DMA_MAPPING_ERROR;

	dev_addr = xen_phys_to_bus(map);
	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
					dev_addr, map & ~PAGE_MASK, size, dir, attrs);

	/*
	 * Ensure that the address returned is DMA'ble
	 */
	if (dma_capable(dev, dev_addr, size))
		return dev_addr;

	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);

	if (unlikely(!dma_capable(dev, dev_addr, size))) {
		swiotlb_tbl_unmap_single(dev, map, size, dir,
				attrs | DMA_ATTR_SKIP_CPU_SYNC);
		return DMA_MAPPING_ERROR;
	}

	page = pfn_to_page(map >> PAGE_SHIFT);
	offset = map & ~PAGE_MASK;
done:
	/*
	 * we are not interested in the dma_addr returned by xen_dma_map_page,
	 * only in the potential cache flushes executed by the function.
	 */
	xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
	return dev_addr;
}

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous xen_swiotlb_map_page call.  All