Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c39d3516 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk
Browse files

radeon/ttm/PCIe: Use dma_addr if TTM has set it.



If the TTM layer has used the DMA API to setup pages that are
TTM_PAGE_FLAG_DMA32 (look at patch titled: "ttm: Utilize the dma_addr_t
array for pages that are to in DMA32 pool."), lets use it
when programming the GART in the PCIe type cards.

This patch skips doing the pci_map_page (and pci_unmap_page) if
there is a DMA addresses passed in for that page. If the dma_address
is zero (or DMA_ERROR_CODE), then we continue on with our old
behaviour.

[v2: Fixed an indentation problem, added reviewed-by tag]
[v3: Added Acked-by Jerome]

Acked-by: default avatarJerome Glisse <j.glisse@gmail.com>
Reviewed-by: default avatarThomas Hellstrom <thomas@shipmail.org>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: default avatarIan Campbell <ian.campbell@citrix.com>
parent 27e8b237
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -317,6 +317,7 @@ struct radeon_gart {
	union radeon_gart_table		table;
	struct page			**pages;
	dma_addr_t			*pages_addr;
	bool				*ttm_alloced;
	bool				ready;
};

@@ -329,7 +330,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
			int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
		     int pages, struct page **pagelist);
		     int pages, struct page **pagelist,
		     dma_addr_t *dma_addr);


/*
+26 −10
Original line number Diff line number Diff line
@@ -149,6 +149,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
	for (i = 0; i < pages; i++, p++) {
		if (rdev->gart.pages[p]) {
			if (!rdev->gart.ttm_alloced[p])
				pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
				       		PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
			rdev->gart.pages[p] = NULL;
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}

int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
		     int pages, struct page **pagelist)
		     int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
	unsigned t;
	unsigned p;
@@ -180,6 +181,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);

	for (i = 0; i < pages; i++, p++) {
		/* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
		 * is requested. */
		if (dma_addr[i] != DMA_ERROR_CODE) {
			rdev->gart.ttm_alloced[p] = true;
			rdev->gart.pages_addr[p] = dma_addr[i];
		} else {
			/* we need to support large memory configurations */
			/* assume that unbind have already been call on the range */
			rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
@@ -190,6 +197,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
				radeon_gart_unbind(rdev, offset, pages);
				return -ENOMEM;
			}
		}
		rdev->gart.pages[p] = pagelist[i];
		page_base = rdev->gart.pages_addr[p];
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
		radeon_gart_fini(rdev);
		return -ENOMEM;
	}
	rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
					 rdev->gart.num_cpu_pages, GFP_KERNEL);
	if (rdev->gart.ttm_alloced == NULL) {
		radeon_gart_fini(rdev);
		return -ENOMEM;
	}
	/* set GART entry to point to the dummy page by default */
	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
	rdev->gart.ready = false;
	kfree(rdev->gart.pages);
	kfree(rdev->gart.pages_addr);
	kfree(rdev->gart.ttm_alloced);
	rdev->gart.pages = NULL;
	rdev->gart.pages_addr = NULL;
	rdev->gart.ttm_alloced = NULL;
}
+4 −1
Original line number Diff line number Diff line
@@ -647,6 +647,7 @@ struct radeon_ttm_backend {
	unsigned long			num_pages;
	struct page			**pages;
	struct page			*dummy_read_page;
	dma_addr_t			*dma_addrs;
	bool				populated;
	bool				bound;
	unsigned			offset;
@@ -662,6 +663,7 @@ static int radeon_ttm_backend_populate(struct ttm_backend *backend,

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	gtt->pages = pages;
	gtt->dma_addrs = dma_addrs;
	gtt->num_pages = num_pages;
	gtt->dummy_read_page = dummy_read_page;
	gtt->populated = true;
@@ -674,6 +676,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	gtt->pages = NULL;
	gtt->dma_addrs = NULL;
	gtt->num_pages = 0;
	gtt->dummy_read_page = NULL;
	gtt->populated = false;
@@ -694,7 +697,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
		     gtt->num_pages, bo_mem, backend);
	}
	r = radeon_gart_bind(gtt->rdev, gtt->offset,
			     gtt->num_pages, gtt->pages);
			     gtt->num_pages, gtt->pages, gtt->dma_addrs);
	if (r) {
		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
			  gtt->num_pages, gtt->offset);