Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 325a7282 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau: consolidate handling of dma mask



Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent a220dd73
Loading
Loading
Loading
Loading
+1 −30
Original line number Diff line number Diff line
@@ -253,7 +253,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
	struct nvkm_device *device = nvxx_device(&drm->client.device);
	struct nvkm_pci *pci = device->pci;
	struct drm_device *dev = drm->dev;
	u8 bits;
	int ret;

	if (pci && pci->agp.bridge) {
@@ -263,34 +262,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
		drm->agp.cma = pci->agp.cma;
	}

	bits = nvxx_mmu(&drm->client.device)->dma_bits;
	if (nvxx_device(&drm->client.device)->func->pci) {
		if (drm->agp.bridge)
			bits = 32;
	} else if (device->func->tegra) {
		struct nvkm_device_tegra *tegra = device->func->tegra(device);

		/*
		 * If the platform can use a IOMMU, then the addressable DMA
		 * space is constrained by the IOMMU bit
		 */
		if (tegra->func->iommu_bit)
			bits = min(bits, tegra->func->iommu_bit);

	}

	ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
	if (ret && bits != 32) {
		bits = 32;
		ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
	}
	if (ret)
		return ret;

	ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
	if (ret)
		dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));

	ret = nouveau_ttm_global_init(drm);
	if (ret)
		return ret;
@@ -300,7 +271,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
				  &nouveau_bo_driver,
				  dev->anon_inode->i_mapping,
				  DRM_FILE_PAGE_OFFSET,
				  bits <= 32 ? true : false);
				  drm->client.mmu.dmabits <= 32 ? true : false);
	if (ret) {
		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
		return ret;
+12 −12
Original line number Diff line number Diff line
@@ -1627,7 +1627,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
	const struct nvkm_device_pci_vendor *pciv;
	const char *name = NULL;
	struct nvkm_device_pci *pdev;
	int ret;
	int ret, bits;

	ret = pci_enable_device(pci_dev);
	if (ret)
@@ -1679,17 +1679,17 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
	if (ret)
		return ret;

	/*
	 * Set a preliminary DMA mask based on the .dma_bits member of the
	 * MMU subdevice. This allows other subdevices to create DMA mappings
	 * in their init() or oneinit() methods, which may be called before the
	 * TTM layer sets the DMA mask definitively.
	 * This is necessary for platforms where the default DMA mask of 32
	 * does not cover any system memory, i.e., when all RAM is > 4 GB.
	 */
	if (pdev->device.mmu)
		dma_set_mask_and_coherent(&pci_dev->dev,
				DMA_BIT_MASK(pdev->device.mmu->dma_bits));
	/* Set DMA mask based on capabilities reported by the MMU subdev. */
	if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
		bits = pdev->device.mmu->dma_bits;
	else
		bits = 32;

	ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
	if (ret && bits != 32) {
		dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
		pdev->device.mmu->dma_bits = 32;
	}

	return 0;
}
+0 −2
Original line number Diff line number Diff line
@@ -309,8 +309,6 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,

	/**
	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
	 * This will be refined in nouveau_ttm_init but we need to do it early
	 * for instmem to behave properly
	 */
	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
	if (ret)