Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cd821077 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/mmu: switch to gpuobj accessor macros



Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 19187075
Loading
Loading
Loading
Loading
+16 −8
Original line number Diff line number Diff line
@@ -79,8 +79,10 @@ gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
	if (pgt[1])
		pde[0] = 0x00000001 | (pgt[1]->addr >> 8);

	nv_wo32(pgd, (index * 8) + 0, pde[0]);
	nv_wo32(pgd, (index * 8) + 4, pde[1]);
	nvkm_kmap(pgd);
	nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
	nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
	nvkm_done(pgd);
}

static inline u64
@@ -114,12 +116,14 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
		ltc->tags_clear(ltc, tag, cnt);
	}

	nvkm_kmap(pgt);
	while (cnt--) {
		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
		phys += next;
		pte  += 8;
	}
	nvkm_done(pgt);
}

static void
@@ -130,24 +134,28 @@ gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
	/* compressed storage types are invalid for system memory */
	u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];

	nvkm_kmap(pgt);
	pte <<= 3;
	while (cnt--) {
		u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
		pte += 8;
	}
	nvkm_done(pgt);
}

static void
gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
	nvkm_kmap(pgt);
	pte <<= 3;
	while (cnt--) {
		nv_wo32(pgt, pte + 0, 0x00000000);
		nv_wo32(pgt, pte + 4, 0x00000000);
		nvkm_wo32(pgt, pte + 0, 0x00000000);
		nvkm_wo32(pgt, pte + 4, 0x00000000);
		pte += 8;
	}
	nvkm_done(pgt);
}

static void
+10 −4
Original line number Diff line number Diff line
@@ -37,26 +37,30 @@ nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
	pte = 0x00008 + (pte * 4);
	nvkm_kmap(pgt);
	while (cnt) {
		u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
		u32 phys = (u32)*list++;
		while (cnt && page--) {
			nv_wo32(pgt, pte, phys | 3);
			nvkm_wo32(pgt, pte, phys | 3);
			phys += NV04_PDMA_PAGE;
			pte += 4;
			cnt -= 1;
		}
	}
	nvkm_done(pgt);
}

static void
nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
	pte = 0x00008 + (pte * 4);
	nvkm_kmap(pgt);
	while (cnt--) {
		nv_wo32(pgt, pte, 0x00000000);
		nvkm_wo32(pgt, pte, 0x00000000);
		pte += 4;
	}
	nvkm_done(pgt);
}

static void
@@ -118,8 +122,10 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
	if (ret)
		return ret;

	nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
	nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
	nvkm_kmap(dma);
	nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
	nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
	nvkm_done(dma);
	return 0;
}

+6 −2
Original line number Diff line number Diff line
@@ -39,26 +39,30 @@ nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
	pte = pte * 4;
	nvkm_kmap(pgt);
	while (cnt) {
		u32 page = PAGE_SIZE / NV41_GART_PAGE;
		u64 phys = (u64)*list++;
		while (cnt && page--) {
			nv_wo32(pgt, pte, (phys >> 7) | 1);
			nvkm_wo32(pgt, pte, (phys >> 7) | 1);
			phys += NV41_GART_PAGE;
			pte += 4;
			cnt -= 1;
		}
	}
	nvkm_done(pgt);
}

static void
nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
	pte = pte * 4;
	nvkm_kmap(pgt);
	while (cnt--) {
		nv_wo32(pgt, pte, 0x00000000);
		nvkm_wo32(pgt, pte, 0x00000000);
		pte += 4;
	}
	nvkm_done(pgt);
}

static void
+20 −16
Original line number Diff line number Diff line
@@ -41,10 +41,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
	u32 base = (pte << 2) & ~0x0000000f;
	u32 tmp[4];

	tmp[0] = nv_ro32(pgt, base + 0x0);
	tmp[1] = nv_ro32(pgt, base + 0x4);
	tmp[2] = nv_ro32(pgt, base + 0x8);
	tmp[3] = nv_ro32(pgt, base + 0xc);
	tmp[0] = nvkm_ro32(pgt, base + 0x0);
	tmp[1] = nvkm_ro32(pgt, base + 0x4);
	tmp[2] = nvkm_ro32(pgt, base + 0x8);
	tmp[3] = nvkm_ro32(pgt, base + 0xc);

	while (cnt--) {
		u32 addr = list ? (*list++ >> 12) : (null >> 12);
@@ -74,10 +74,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
		}
	}

	nv_wo32(pgt, base + 0x0, tmp[0]);
	nv_wo32(pgt, base + 0x4, tmp[1]);
	nv_wo32(pgt, base + 0x8, tmp[2]);
	nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
	nvkm_wo32(pgt, base + 0x0, tmp[0]);
	nvkm_wo32(pgt, base + 0x4, tmp[1]);
	nvkm_wo32(pgt, base + 0x8, tmp[2]);
	nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
}

static void
@@ -88,6 +88,7 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
	u32 tmp[4];
	int i;

	nvkm_kmap(pgt);
	if (pte & 3) {
		u32  max = 4 - (pte & 3);
		u32 part = (cnt > max) ? max : cnt;
@@ -100,15 +101,16 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
	while (cnt >= 4) {
		for (i = 0; i < 4; i++)
			tmp[i] = *list++ >> 12;
		nv_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
		nv_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
		nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
		nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
		nvkm_wo32(pgt, pte++ * 4, tmp[0] >>  0 | tmp[1] << 27);
		nvkm_wo32(pgt, pte++ * 4, tmp[1] >>  5 | tmp[2] << 22);
		nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
		nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
		cnt -= 4;
	}

	if (cnt)
		nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
	nvkm_done(pgt);
}

static void
@@ -116,6 +118,7 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
	struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);

	nvkm_kmap(pgt);
	if (pte & 3) {
		u32  max = 4 - (pte & 3);
		u32 part = (cnt > max) ? max : cnt;
@@ -125,15 +128,16 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
	}

	while (cnt >= 4) {
		nv_wo32(pgt, pte++ * 4, 0x00000000);
		nv_wo32(pgt, pte++ * 4, 0x00000000);
		nv_wo32(pgt, pte++ * 4, 0x00000000);
		nv_wo32(pgt, pte++ * 4, 0x00000000);
		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
		nvkm_wo32(pgt, pte++ * 4, 0x00000000);
		cnt -= 4;
	}

	if (cnt)
		nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
	nvkm_done(pgt);
}

static void
+16 −8
Original line number Diff line number Diff line
@@ -53,8 +53,10 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
			phys |= 0x20;
	}

	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
	nvkm_kmap(pgd);
	nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
	nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
	nvkm_done(pgd);
}

static inline u64
@@ -89,6 +91,7 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
	pte <<= 3;
	cnt <<= 3;

	nvkm_kmap(pgt);
	while (cnt) {
		u32 offset_h = upper_32_bits(phys);
		u32 offset_l = lower_32_bits(phys);
@@ -109,12 +112,13 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
		}

		while (block) {
			nv_wo32(pgt, pte + 0, offset_l);
			nv_wo32(pgt, pte + 4, offset_h);
			nvkm_wo32(pgt, pte + 0, offset_l);
			nvkm_wo32(pgt, pte + 4, offset_h);
			pte += 8;
			block -= 8;
		}
	}
	nvkm_done(pgt);
}

static void
@@ -123,23 +127,27 @@ nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
{
	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
	pte <<= 3;
	nvkm_kmap(pgt);
	while (cnt--) {
		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
		nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
		nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
		pte += 8;
	}
	nvkm_done(pgt);
}

static void
nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{
	pte <<= 3;
	nvkm_kmap(pgt);
	while (cnt--) {
		nv_wo32(pgt, pte + 0, 0x00000000);
		nv_wo32(pgt, pte + 4, 0x00000000);
		nvkm_wo32(pgt, pte + 0, 0x00000000);
		nvkm_wo32(pgt, pte + 4, 0x00000000);
		pte += 8;
	}
	nvkm_done(pgt);
}

static void