Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fd1496a0 authored by Alexandre Courbot's avatar Alexandre Courbot Committed by Ben Skeggs
Browse files

drm/nouveau: map pages using DMA API



The DMA API is the recommended way to map pages no matter what the
underlying bus is. Use the DMA functions for page mapping and remove
currently existing wrappers.

Signed-off-by: default avatarAlexandre Courbot <acourbot@nvidia.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 3967633d
Loading
Loading
Loading
Loading
+0 −25
Original line number Diff line number Diff line
@@ -487,31 +487,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
	}
}

dma_addr_t
nv_device_map_page(struct nouveau_device *device, struct page *page)
{
	dma_addr_t ret;

	if (nv_device_is_pci(device)) {
		ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
				   PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(device->pdev, ret))
			ret = 0;
	} else {
		ret = page_to_phys(page);
	}

	return ret;
}

void
nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
{
	if (nv_device_is_pci(device))
		pci_unmap_page(device->pdev, addr, PAGE_SIZE,
			       PCI_DMA_BIDIRECTIONAL);
}

int
nv_device_get_irq(struct nouveau_device *device, bool stall)
{
+0 −6
Original line number Diff line number Diff line
@@ -174,12 +174,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
resource_size_t
nv_device_resource_len(struct nouveau_device *device, unsigned int bar);

dma_addr_t
nv_device_map_page(struct nouveau_device *device, struct page *page);

void
nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);

int
nv_device_get_irq(struct nouveau_device *device, bool stall);

+5 −2
Original line number Diff line number Diff line
@@ -250,7 +250,9 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,

	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (priv->r100c08_page) {
		priv->r100c08 = nv_device_map_page(device, priv->r100c08_page);
		priv->r100c08 = dma_map_page(nv_device_base(device),
					     priv->r100c08_page, 0, PAGE_SIZE,
					     DMA_BIDIRECTIONAL);
		if (!priv->r100c08)
			nv_warn(priv, "failed 0x100c08 page map\n");
	} else {
@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object)
	struct nv50_fb_priv *priv = (void *)object;

	if (priv->r100c08_page) {
		nv_device_unmap_page(device, priv->r100c08);
		dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
			       DMA_BIDIRECTIONAL);
		__free_page(priv->r100c08_page);
	}

+5 −2
Original line number Diff line number Diff line
@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object)
	struct nvc0_fb_priv *priv = (void *)object;

	if (priv->r100c10_page) {
		nv_device_unmap_page(device, priv->r100c10);
		dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
			       DMA_BIDIRECTIONAL);
		__free_page(priv->r100c10_page);
	}

@@ -93,7 +94,9 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,

	priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (priv->r100c10_page) {
		priv->r100c10 = nv_device_map_page(device, priv->r100c10_page);
		priv->r100c10 = dma_map_page(nv_device_base(device),
					     priv->r100c10_page, 0, PAGE_SIZE,
					     DMA_BIDIRECTIONAL);
		if (!priv->r100c10)
			return -EFAULT;
	}
+16 −6
Original line number Diff line number Diff line
@@ -1340,6 +1340,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
	struct nouveau_drm *drm;
	struct nouveau_device *device;
	struct drm_device *dev;
	struct device *pdev;
	unsigned i;
	int r;
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1358,6 +1359,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
	drm = nouveau_bdev(ttm->bdev);
	device = nv_device(drm->device);
	dev = drm->dev;
	pdev = nv_device_base(device);

#if __OS_HAS_AGP
	if (drm->agp.stat == ENABLED) {
@@ -1377,17 +1379,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
	}

	for (i = 0; i < ttm->num_pages; i++) {
		ttm_dma->dma_address[i] = nv_device_map_page(device,
							     ttm->pages[i]);
		if (!ttm_dma->dma_address[i]) {
		dma_addr_t addr;

		addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
				    DMA_BIDIRECTIONAL);

		if (dma_mapping_error(pdev, addr)) {
			while (--i) {
				nv_device_unmap_page(device,
						     ttm_dma->dma_address[i]);
				dma_unmap_page(pdev, ttm_dma->dma_address[i],
					       PAGE_SIZE, DMA_BIDIRECTIONAL);
				ttm_dma->dma_address[i] = 0;
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}

		ttm_dma->dma_address[i] = addr;
	}
	return 0;
}
@@ -1399,6 +1406,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
	struct nouveau_drm *drm;
	struct nouveau_device *device;
	struct drm_device *dev;
	struct device *pdev;
	unsigned i;
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);

@@ -1408,6 +1416,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
	drm = nouveau_bdev(ttm->bdev);
	device = nv_device(drm->device);
	dev = drm->dev;
	pdev = nv_device_base(device);

#if __OS_HAS_AGP
	if (drm->agp.stat == ENABLED) {
@@ -1425,7 +1434,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)

	for (i = 0; i < ttm->num_pages; i++) {
		if (ttm_dma->dma_address[i]) {
			nv_device_unmap_page(device, ttm_dma->dma_address[i]);
			dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
				       DMA_BIDIRECTIONAL);
		}
	}