Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b35d9fe authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Konrad Rzeszutek Wilk
Browse files

swiotlb-xen: use ->map_page to implement ->map_sg



We can simply loop over the segments and map them, removing lots of
duplicate code.

Reviewed-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent aca351cc
Loading
Loading
Loading
Loading
+10 −58
Original line number Diff line number Diff line
@@ -514,24 +514,8 @@ xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,

}

/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the above xen_swiotlb_map_page
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
 * same here.
 */
static int
xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
		enum dma_data_direction dir, unsigned long attrs)
{
	struct scatterlist *sg;
@@ -540,50 +524,18 @@ xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
	BUG_ON(dir == DMA_NONE);

	for_each_sg(sgl, sg, nelems, i) {
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = xen_phys_to_bus(paddr);

		if (swiotlb_force == SWIOTLB_FORCE ||
		    xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
		    !dma_capable(hwdev, dev_addr, sg->length) ||
		    range_straddles_page_boundary(paddr, sg->length)) {
			phys_addr_t map = swiotlb_tbl_map_single(hwdev,
								 start_dma_addr,
								 sg_phys(sg),
								 sg->length,
								 dir, attrs);
			if (map == DMA_MAPPING_ERROR) {
				dev_warn(hwdev, "swiotlb buffer is full\n");
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				attrs |= DMA_ATTR_SKIP_CPU_SYNC;
				xen_swiotlb_unmap_sg(hwdev, sgl, i, dir, attrs);
				sg_dma_len(sgl) = 0;
				return 0;
			}
			dev_addr = xen_phys_to_bus(map);
			xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
						dev_addr,
						map & ~PAGE_MASK,
						sg->length,
						dir,
						attrs);
			sg->dma_address = dev_addr;
		} else {
			/* we are not interested in the dma_addr returned by
			 * xen_dma_map_page, only in the potential cache flushes executed
			 * by the function. */
			xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
						dev_addr,
						paddr & ~PAGE_MASK,
						sg->length,
						dir,
						attrs);
			sg->dma_address = dev_addr;
		}
		sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
				sg->offset, sg->length, dir, attrs);
		if (sg->dma_address == DMA_MAPPING_ERROR)
			goto out_unmap;
		sg_dma_len(sg) = sg->length;
	}

	return nelems;
out_unmap:
	xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
	sg_dma_len(sgl) = 0;
	return 0;
}

/*