Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b9394647 authored by FUJITA Tomonori's avatar FUJITA Tomonori
Browse files

swiotlb: use dma_capable()



This converts swiotlb to use dma_capable() instead of
swiotlb_arch_address_needs_mapping() and is_buffer_dma_capable().

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: default avatarBecky Bruce <beckyb@kernel.crashing.org>
parent 9a937c91
Loading
Loading
Loading
Loading
+5 −19
Original line number Diff line number Diff line
@@ -131,12 +131,6 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
	return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
}

int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
					       dma_addr_t addr, size_t size)
{
	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
}

static void swiotlb_print_info(unsigned long bytes)
{
	phys_addr_t pstart, pend;
@@ -297,12 +291,6 @@ cleanup1:
	return -ENOMEM;
}

static inline int
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
	return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
}

static int is_swiotlb_buffer(phys_addr_t paddr)
{
	return paddr >= virt_to_phys(io_tlb_start) &&
@@ -539,9 +527,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
		dma_mask = hwdev->coherent_dma_mask;

	ret = (void *)__get_free_pages(flags, order);
	if (ret &&
	    !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
				   size)) {
	if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
		/*
		 * The allocated memory isn't reachable by the device.
		 */
@@ -563,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
	dev_addr = swiotlb_virt_to_bus(hwdev, ret);

	/* Confirm address can be DMA'd by device */
	if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
	if (dev_addr + size > dma_mask) {
		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
		       (unsigned long long)dma_mask,
		       (unsigned long long)dev_addr);
@@ -635,7 +621,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
	if (!address_needs_mapping(dev, dev_addr, size) && !swiotlb_force)
	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
		return dev_addr;

	/*
@@ -652,7 +638,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
	/*
	 * Ensure that the address returned is DMA'ble
	 */
	if (address_needs_mapping(dev, dev_addr, size))
	if (!dma_capable(dev, dev_addr, size))
		panic("map_single: bounce buffer is not DMA'ble");

	return dev_addr;
@@ -805,7 +791,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
		dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr);

		if (swiotlb_force ||
		    address_needs_mapping(hwdev, dev_addr, sg->length)) {
		    !dma_capable(hwdev, dev_addr, sg->length)) {
			void *map = map_single(hwdev, sg_phys(sg),
					       sg->length, dir);
			if (!map) {