Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 83f56106 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/mmu: switch to device pri macros



Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 25e3a463
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -154,7 +154,8 @@ static void
gf100_vm_flush(struct nvkm_vm *vm)
{
	struct nvkm_mmu *mmu = (void *)vm->mmu;
	struct nvkm_bar *bar = nvkm_bar(mmu);
	struct nvkm_device *device = mmu->subdev.device;
	struct nvkm_bar *bar = device->bar;
	struct nvkm_vm_pgd *vpgd;
	u32 type;

@@ -171,16 +172,16 @@ gf100_vm_flush(struct nvkm_vm *vm)
		 */
		if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
			nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
				 nv_rd32(mmu, 0x100c80), type);
				 nvkm_rd32(device, 0x100c80), type);
		}

		nv_wr32(mmu, 0x100cb8, vpgd->obj->addr >> 8);
		nv_wr32(mmu, 0x100cbc, 0x80000000 | type);
		nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
		nvkm_wr32(device, 0x100cbc, 0x80000000 | type);

		/* wait for flush to be queued? */
		if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
			nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
				 nv_rd32(mmu, 0x100c80), type);
				 nvkm_rd32(device, 0x100c80), type);
		}
	}
	mutex_unlock(&nv_subdev(mmu)->mutex);
+8 −6
Original line number Diff line number Diff line
@@ -65,14 +65,15 @@ static void
nv41_vm_flush(struct nvkm_vm *vm)
{
	struct nv04_mmu *mmu = (void *)vm->mmu;
	struct nvkm_device *device = mmu->base.subdev.device;

	mutex_lock(&nv_subdev(mmu)->mutex);
	nv_wr32(mmu, 0x100810, 0x00000022);
	nvkm_wr32(device, 0x100810, 0x00000022);
	if (!nv_wait(mmu, 0x100810, 0x00000020, 0x00000020)) {
		nv_warn(mmu, "flush timeout, 0x%08x\n",
			nv_rd32(mmu, 0x100810));
			nvkm_rd32(device, 0x100810));
	}
	nv_wr32(mmu, 0x100810, 0x00000000);
	nvkm_wr32(device, 0x100810, 0x00000000);
	mutex_unlock(&nv_subdev(mmu)->mutex);
}

@@ -131,6 +132,7 @@ static int
nv41_mmu_init(struct nvkm_object *object)
{
	struct nv04_mmu *mmu = (void *)object;
	struct nvkm_device *device = mmu->base.subdev.device;
	struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
	int ret;

@@ -138,9 +140,9 @@ nv41_mmu_init(struct nvkm_object *object)
	if (ret)
		return ret;

	nv_wr32(mmu, 0x100800, dma->addr | 0x00000002);
	nv_mask(mmu, 0x10008c, 0x00000100, 0x00000100);
	nv_wr32(mmu, 0x100820, 0x00000000);
	nvkm_wr32(device, 0x100800, dma->addr | 0x00000002);
	nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
	nvkm_wr32(device, 0x100820, 0x00000000);
	return 0;
}

+15 −13
Original line number Diff line number Diff line
@@ -140,11 +140,12 @@ static void
nv44_vm_flush(struct nvkm_vm *vm)
{
	struct nv04_mmu *mmu = (void *)vm->mmu;
	nv_wr32(mmu, 0x100814, mmu->base.limit - NV44_GART_PAGE);
	nv_wr32(mmu, 0x100808, 0x00000020);
	struct nvkm_device *device = mmu->base.subdev.device;
	nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
	nvkm_wr32(device, 0x100808, 0x00000020);
	if (!nv_wait(mmu, 0x100808, 0x00000001, 0x00000001))
		nv_error(mmu, "timeout: 0x%08x\n", nv_rd32(mmu, 0x100808));
	nv_wr32(mmu, 0x100808, 0x00000000);
		nv_error(mmu, "timeout: 0x%08x\n", nvkm_rd32(device, 0x100808));
	nvkm_wr32(device, 0x100808, 0x00000000);
}

/*******************************************************************************
@@ -208,6 +209,7 @@ static int
nv44_mmu_init(struct nvkm_object *object)
{
	struct nv04_mmu *mmu = (void *)object;
	struct nvkm_device *device = mmu->base.subdev.device;
	struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
	u32 addr;
	int ret;
@@ -220,17 +222,17 @@ nv44_mmu_init(struct nvkm_object *object)
	 * allocated on 512KiB alignment, and not exceed a total size
	 * of 512KiB for this to work correctly
	 */
	addr  = nv_rd32(mmu, 0x10020c);
	addr  = nvkm_rd32(device, 0x10020c);
	addr -= ((gart->addr >> 19) + 1) << 19;

	nv_wr32(mmu, 0x100850, 0x80000000);
	nv_wr32(mmu, 0x100818, mmu->null);
	nv_wr32(mmu, 0x100804, NV44_GART_SIZE);
	nv_wr32(mmu, 0x100850, 0x00008000);
	nv_mask(mmu, 0x10008c, 0x00000200, 0x00000200);
	nv_wr32(mmu, 0x100820, 0x00000000);
	nv_wr32(mmu, 0x10082c, 0x00000001);
	nv_wr32(mmu, 0x100800, addr | 0x00000010);
	nvkm_wr32(device, 0x100850, 0x80000000);
	nvkm_wr32(device, 0x100818, mmu->null);
	nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
	nvkm_wr32(device, 0x100850, 0x00008000);
	nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
	nvkm_wr32(device, 0x100820, 0x00000000);
	nvkm_wr32(device, 0x10082c, 0x00000001);
	nvkm_wr32(device, 0x100800, addr | 0x00000010);
	return 0;
}

+3 −2
Original line number Diff line number Diff line
@@ -146,7 +146,8 @@ static void
nv50_vm_flush(struct nvkm_vm *vm)
{
	struct nvkm_mmu *mmu = (void *)vm->mmu;
	struct nvkm_bar *bar = nvkm_bar(mmu);
	struct nvkm_device *device = mmu->subdev.device;
	struct nvkm_bar *bar = device->bar;
	struct nvkm_engine *engine;
	int i, vme;

@@ -180,7 +181,7 @@ nv50_vm_flush(struct nvkm_vm *vm)
			continue;
		}

		nv_wr32(mmu, 0x100c80, (vme << 16) | 1);
		nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
		if (!nv_wait(mmu, 0x100c80, 0x00000001, 0x00000000))
			nv_error(mmu, "vm flush timeout: engine %d\n", vme);
	}