Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0d9a8fe authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau: remove allocations from gart populate() hook



Since some somewhat questionable changes a while back, TTM provides a
completely empty array of struct dma_address that stays around for the
entire lifetime of the TTM object.

Lets use this array, *always*, rather than wasting yet more memory on
another array who's purpose is identical, as well as yet another bool array
of the same size saying *which* of the previous two arrays to use...

This change will also solve the high order allocation failures seen by
some people while using nouveau.

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent a1484512
Loading
Loading
Loading
Loading
+21 −45
Original line number Diff line number Diff line
@@ -12,8 +12,8 @@ struct nouveau_sgdma_be {
	struct drm_device *dev;

	dma_addr_t *pages;
	bool *ttm_alloced;
	unsigned nr_pages;
	bool unmap_pages;

	u64 offset;
	bool bound;
@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_device *dev = nvbe->dev;
	int i;

	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);

	if (nvbe->pages)
		return -EINVAL;

	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
	if (!nvbe->pages)
		return -ENOMEM;
	nvbe->pages = dma_addrs;
	nvbe->nr_pages = num_pages;
	nvbe->unmap_pages = true;

	nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
	if (!nvbe->ttm_alloced) {
		kfree(nvbe->pages);
		nvbe->pages = NULL;
		return -ENOMEM;
	/* this code path isn't called and is incorrect anyways */
	if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
		nvbe->unmap_pages = false;
		return 0;
	}

	nvbe->nr_pages = 0;
	while (num_pages--) {
		/* this code path isn't called and is incorrect anyways */
		if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
			nvbe->pages[nvbe->nr_pages] =
					dma_addrs[nvbe->nr_pages];
		 	nvbe->ttm_alloced[nvbe->nr_pages] = true;
		} else {
			nvbe->pages[nvbe->nr_pages] =
				pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
	for (i = 0; i < num_pages; i++) {
		nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
			if (pci_dma_mapping_error(dev->pdev,
						  nvbe->pages[nvbe->nr_pages])) {
		if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
			nvbe->nr_pages = --i;
			be->func->clear(be);
			return -EFAULT;
		}
			nvbe->ttm_alloced[nvbe->nr_pages] = false;
		}

		nvbe->nr_pages++;
	}

	return 0;
@@ -72,25 +57,16 @@ static void
nouveau_sgdma_clear(struct ttm_backend *be)
{
	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
	struct drm_device *dev;

	if (nvbe && nvbe->pages) {
		dev = nvbe->dev;
		NV_DEBUG(dev, "\n");
	struct drm_device *dev = nvbe->dev;

	if (nvbe->bound)
		be->func->unbind(be);

	if (nvbe->unmap_pages) {
		while (nvbe->nr_pages--) {
			if (!nvbe->ttm_alloced[nvbe->nr_pages])
			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		}
		kfree(nvbe->pages);
		kfree(nvbe->ttm_alloced);
		nvbe->pages = NULL;
		nvbe->ttm_alloced = NULL;
		nvbe->nr_pages = 0;
	}
}