Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit de1e7cd6 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'stable/ttm.pci-api.v5' of...

Merge branch 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen into drm-next

* 'stable/ttm.pci-api.v5' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  ttm: Include the 'struct dev' when using the DMA API.
  nouveau/ttm/PCIe: Use dma_addr if TTM has set it.
  radeon/ttm/PCIe: Use dma_addr if TTM has set it.
  ttm: Expand (*populate) to support an array of DMA addresses.
  ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set.
  ttm: Introduce a placeholder for DMA (bus) addresses.
parents 7811bddb 5a893fc2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -409,6 +409,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
	if (ret)
		return ret;

	dev_priv->ttm.bdev.dev = dev->dev;
	ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
				 dev_priv->ttm.bo_global_ref.ref.object,
				 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+23 −8
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
	struct drm_device *dev;

	dma_addr_t *pages;
	bool *ttm_alloced;
	unsigned nr_pages;

	u64 offset;
@@ -20,7 +21,8 @@ struct nouveau_sgdma_be {

static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
		       struct page **pages, struct page *dummy_read_page)
		       struct page **pages, struct page *dummy_read_page,
		       dma_addr_t *dma_addrs)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_device *dev = nvbe->dev;
@@ -34,8 +36,17 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
	if (!nvbe->pages)
		return -ENOMEM;

	nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
	if (!nvbe->ttm_alloced)
		return -ENOMEM;

	nvbe->nr_pages = 0;
	while (num_pages--) {
		if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
			nvbe->pages[nvbe->nr_pages] =
					dma_addrs[nvbe->nr_pages];
		 	nvbe->ttm_alloced[nvbe->nr_pages] = true;
		} else {
			nvbe->pages[nvbe->nr_pages] =
				pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
@@ -44,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
				be->func->clear(be);
				return -EFAULT;
			}
		}

		nvbe->nr_pages++;
	}
@@ -65,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be)
			be->func->unbind(be);

		while (nvbe->nr_pages--) {
			if (!nvbe->ttm_alloced[nvbe->nr_pages])
				pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		}
		kfree(nvbe->pages);
		kfree(nvbe->ttm_alloced);
		nvbe->pages = NULL;
		nvbe->ttm_alloced = NULL;
		nvbe->nr_pages = 0;
	}
}
+3 −1
Original line number Diff line number Diff line
@@ -328,6 +328,7 @@ struct radeon_gart {
	union radeon_gart_table		table;
	struct page			**pages;
	dma_addr_t			*pages_addr;
	bool				*ttm_alloced;
	bool				ready;
};

@@ -340,7 +341,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
			int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
		     int pages, struct page **pagelist);
		     int pages, struct page **pagelist,
		     dma_addr_t *dma_addr);


/*
+26 −10
Original line number Diff line number Diff line
@@ -149,6 +149,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
	for (i = 0; i < pages; i++, p++) {
		if (rdev->gart.pages[p]) {
			if (!rdev->gart.ttm_alloced[p])
				pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
				       		PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
			rdev->gart.pages[p] = NULL;
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}

int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
		     int pages, struct page **pagelist)
		     int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
	unsigned t;
	unsigned p;
@@ -180,6 +181,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);

	for (i = 0; i < pages; i++, p++) {
		/* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
		 * is requested. */
		if (dma_addr[i] != DMA_ERROR_CODE) {
			rdev->gart.ttm_alloced[p] = true;
			rdev->gart.pages_addr[p] = dma_addr[i];
		} else {
			/* we need to support large memory configurations */
			/* assume that unbind have already been call on the range */
			rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
@@ -190,6 +197,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
				radeon_gart_unbind(rdev, offset, pages);
				return -ENOMEM;
			}
		}
		rdev->gart.pages[p] = pagelist[i];
		page_base = rdev->gart.pages_addr[p];
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
		radeon_gart_fini(rdev);
		return -ENOMEM;
	}
	rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
					 rdev->gart.num_cpu_pages, GFP_KERNEL);
	if (rdev->gart.ttm_alloced == NULL) {
		radeon_gart_fini(rdev);
		return -ENOMEM;
	}
	/* set GART entry to point to the dummy page by default */
	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
	rdev->gart.ready = false;
	kfree(rdev->gart.pages);
	kfree(rdev->gart.pages_addr);
	kfree(rdev->gart.ttm_alloced);
	rdev->gart.pages = NULL;
	rdev->gart.pages_addr = NULL;
	rdev->gart.ttm_alloced = NULL;
}
+7 −2
Original line number Diff line number Diff line
@@ -513,6 +513,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
	if (r) {
		return r;
	}
	rdev->mman.bdev.dev = rdev->dev;
	/* No others user of address space so set it to 0 */
	r = ttm_bo_device_init(&rdev->mman.bdev,
			       rdev->mman.bo_global_ref.ref.object,
@@ -647,6 +648,7 @@ struct radeon_ttm_backend {
	unsigned long			num_pages;
	struct page			**pages;
	struct page			*dummy_read_page;
	dma_addr_t			*dma_addrs;
	bool				populated;
	bool				bound;
	unsigned			offset;
@@ -655,12 +657,14 @@ struct radeon_ttm_backend {
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
				       unsigned long num_pages,
				       struct page **pages,
				       struct page *dummy_read_page)
				       struct page *dummy_read_page,
				       dma_addr_t *dma_addrs)
{
	struct radeon_ttm_backend *gtt;

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	gtt->pages = pages;
	gtt->dma_addrs = dma_addrs;
	gtt->num_pages = num_pages;
	gtt->dummy_read_page = dummy_read_page;
	gtt->populated = true;
@@ -673,6 +677,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	gtt->pages = NULL;
	gtt->dma_addrs = NULL;
	gtt->num_pages = 0;
	gtt->dummy_read_page = NULL;
	gtt->populated = false;
@@ -693,7 +698,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
		     gtt->num_pages, bo_mem, backend);
	}
	r = radeon_gart_bind(gtt->rdev, gtt->offset,
			     gtt->num_pages, gtt->pages);
			     gtt->num_pages, gtt->pages, gtt->dma_addrs);
	if (r) {
		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
			  gtt->num_pages, gtt->offset);
Loading