Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e0138c26 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk
Browse files

nouveau/ttm/PCIe: Use dma_addr if TTM has set it.



If the TTM layer has used the DMA API to setup pages that are
TTM_PAGE_FLAG_DMA32 (look at patch titled: "ttm: Utilize the
DMA API for pages that have TTM_PAGE_FLAG_DMA32 set"), lets
use it when programming the GART in the PCIe type cards.

This patch skips doing the pci_map_page (and pci_unmap_page) if
there is a DMA addresses passed in for that page. If the dma_address
is zero (or DMA_ERROR_CODE), then we continue on with our old
behaviour.

[v2: Added a review-by tag]

Reviewed-by: default avatarThomas Hellstrom <thomas@shipmail.org>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: default avatarIan Campbell <ian.campbell@citrix.com>
parent c39d3516
Loading
Loading
Loading
Loading
+21 −7
Original line number Original line Diff line number Diff line
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
	struct drm_device *dev;
	struct drm_device *dev;


	dma_addr_t *pages;
	dma_addr_t *pages;
	bool *ttm_alloced;
	unsigned nr_pages;
	unsigned nr_pages;


	unsigned pte_start;
	unsigned pte_start;
@@ -35,8 +36,17 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
	if (!nvbe->pages)
	if (!nvbe->pages)
		return -ENOMEM;
		return -ENOMEM;


	nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
	if (!nvbe->ttm_alloced)
		return -ENOMEM;

	nvbe->nr_pages = 0;
	nvbe->nr_pages = 0;
	while (num_pages--) {
	while (num_pages--) {
		if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
			nvbe->pages[nvbe->nr_pages] =
					dma_addrs[nvbe->nr_pages];
		 	nvbe->ttm_alloced[nvbe->nr_pages] = true;
		} else {
			nvbe->pages[nvbe->nr_pages] =
			nvbe->pages[nvbe->nr_pages] =
				pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
				pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
@@ -45,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
				be->func->clear(be);
				be->func->clear(be);
				return -EFAULT;
				return -EFAULT;
			}
			}
		}


		nvbe->nr_pages++;
		nvbe->nr_pages++;
	}
	}
@@ -66,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be)
			be->func->unbind(be);
			be->func->unbind(be);


		while (nvbe->nr_pages--) {
		while (nvbe->nr_pages--) {
			if (!nvbe->ttm_alloced[nvbe->nr_pages])
				pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
				pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		}
		}
		kfree(nvbe->pages);
		kfree(nvbe->pages);
		kfree(nvbe->ttm_alloced);
		nvbe->pages = NULL;
		nvbe->pages = NULL;
		nvbe->ttm_alloced = NULL;
		nvbe->nr_pages = 0;
		nvbe->nr_pages = 0;
	}
	}
}
}