Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1f5bffca authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/mmu: cosmetic changes



This is purely preparation for upcoming commits, there should be no
code changes here.

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 2ca0ddbc
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@ struct nvkm_vm {
};

struct nvkm_mmu {
	struct nvkm_subdev base;
	struct nvkm_subdev subdev;

	u64 limit;
	u8  dma_bits;
@@ -69,11 +69,11 @@ nvkm_mmu(void *obj)
#define nvkm_mmu_create(p,e,o,i,f,d)                                      \
	nvkm_subdev_create((p), (e), (o), 0, (i), (f), (d))
#define nvkm_mmu_destroy(p)                                               \
	nvkm_subdev_destroy(&(p)->base)
	nvkm_subdev_destroy(&(p)->subdev)
#define nvkm_mmu_init(p)                                                  \
	nvkm_subdev_init(&(p)->base)
	nvkm_subdev_init(&(p)->subdev)
#define nvkm_mmu_fini(p,s)                                                \
	nvkm_subdev_fini(&(p)->base, (s))
	nvkm_subdev_fini(&(p)->subdev, (s))

#define _nvkm_mmu_dtor _nvkm_subdev_dtor
#define _nvkm_mmu_init _nvkm_subdev_init
+1 −1
Original line number Diff line number Diff line
@@ -221,7 +221,7 @@ nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
	struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
	struct nv04_mmu_priv *priv = (void *)mmu;
	struct nv04_mmu *priv = (void *)mmu;
	struct nvkm_vm *vm = NULL;
	nvkm_vm_ref(priv->vm, &vm, NULL);
	man->priv = vm;
+2 −2
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent,
	}

	if (priv->clone) {
		struct nv04_mmu_priv *mmu = nv04_mmu(dmaobj);
		struct nv04_mmu *mmu = nv04_mmu(dmaobj);
		struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
		if (!dmaobj->start)
			return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
@@ -86,7 +86,7 @@ nv04_dmaobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		 struct nvkm_object **pobject)
{
	struct nvkm_dmaeng *dmaeng = (void *)engine;
	struct nv04_mmu_priv *mmu = nv04_mmu(engine);
	struct nv04_mmu *mmu = nv04_mmu(engine);
	struct nv04_dmaobj_priv *priv;
	int ret;

+26 −31
Original line number Diff line number Diff line
@@ -29,11 +29,6 @@

#include <core/gpuobj.h>

struct gf100_mmu_priv {
	struct nvkm_mmu base;
};


/* Map from compressed to corresponding uncompressed storage type.
 * The value 0xff represents an invalid storage type.
 */
@@ -158,8 +153,8 @@ gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
static void
gf100_vm_flush(struct nvkm_vm *vm)
{
	struct gf100_mmu_priv *priv = (void *)vm->mmu;
	struct nvkm_bar *bar = nvkm_bar(priv);
	struct nvkm_mmu *mmu = (void *)vm->mmu;
	struct nvkm_bar *bar = nvkm_bar(mmu);
	struct nvkm_vm_pgd *vpgd;
	u32 type;

@@ -169,26 +164,26 @@ gf100_vm_flush(struct nvkm_vm *vm)
	if (atomic_read(&vm->engref[NVDEV_SUBDEV_BAR]))
		type |= 0x00000004; /* HUB_ONLY */

	mutex_lock(&nv_subdev(priv)->mutex);
	mutex_lock(&nv_subdev(mmu)->mutex);
	list_for_each_entry(vpgd, &vm->pgd_list, head) {
		/* looks like maybe a "free flush slots" counter, the
		 * faster you write to 0x100cbc to more it decreases
		 */
		if (!nv_wait_ne(priv, 0x100c80, 0x00ff0000, 0x00000000)) {
			nv_error(priv, "vm timeout 0: 0x%08x %d\n",
				 nv_rd32(priv, 0x100c80), type);
		if (!nv_wait_ne(mmu, 0x100c80, 0x00ff0000, 0x00000000)) {
			nv_error(mmu, "vm timeout 0: 0x%08x %d\n",
				 nv_rd32(mmu, 0x100c80), type);
		}

		nv_wr32(priv, 0x100cb8, vpgd->obj->addr >> 8);
		nv_wr32(priv, 0x100cbc, 0x80000000 | type);
		nv_wr32(mmu, 0x100cb8, vpgd->obj->addr >> 8);
		nv_wr32(mmu, 0x100cbc, 0x80000000 | type);

		/* wait for flush to be queued? */
		if (!nv_wait(priv, 0x100c80, 0x00008000, 0x00008000)) {
			nv_error(priv, "vm timeout 1: 0x%08x %d\n",
				 nv_rd32(priv, 0x100c80), type);
		if (!nv_wait(mmu, 0x100c80, 0x00008000, 0x00008000)) {
			nv_error(mmu, "vm timeout 1: 0x%08x %d\n",
				 nv_rd32(mmu, 0x100c80), type);
		}
	}
	mutex_unlock(&nv_subdev(priv)->mutex);
	mutex_unlock(&nv_subdev(mmu)->mutex);
}

static int
@@ -203,25 +198,25 @@ gf100_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
	       struct nvkm_oclass *oclass, void *data, u32 size,
	       struct nvkm_object **pobject)
{
	struct gf100_mmu_priv *priv;
	struct nvkm_mmu *mmu;
	int ret;

	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "vm", &priv);
	*pobject = nv_object(priv);
	ret = nvkm_mmu_create(parent, engine, oclass, "VM", "mmu", &mmu);
	*pobject = nv_object(mmu);
	if (ret)
		return ret;

	priv->base.limit = 1ULL << 40;
	priv->base.dma_bits = 40;
	priv->base.pgt_bits  = 27 - 12;
	priv->base.spg_shift = 12;
	priv->base.lpg_shift = 17;
	priv->base.create = gf100_vm_create;
	priv->base.map_pgt = gf100_vm_map_pgt;
	priv->base.map = gf100_vm_map;
	priv->base.map_sg = gf100_vm_map_sg;
	priv->base.unmap = gf100_vm_unmap;
	priv->base.flush = gf100_vm_flush;
	mmu->limit = 1ULL << 40;
	mmu->dma_bits = 40;
	mmu->pgt_bits  = 27 - 12;
	mmu->spg_shift = 12;
	mmu->lpg_shift = 17;
	mmu->create = gf100_vm_create;
	mmu->map_pgt = gf100_vm_map_pgt;
	mmu->map = gf100_vm_map;
	mmu->map_sg = gf100_vm_map_sg;
	mmu->unmap = gf100_vm_unmap;
	mmu->flush = gf100_vm_flush;
	return 0;
}

+27 −27
Original line number Diff line number Diff line
@@ -84,37 +84,37 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
	      struct nvkm_oclass *oclass, void *data, u32 size,
	      struct nvkm_object **pobject)
{
	struct nv04_mmu_priv *priv;
	struct nv04_mmu *mmu;
	struct nvkm_gpuobj *dma;
	int ret;

	ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
			      "pcigart", &priv);
	*pobject = nv_object(priv);
			      "mmu", &mmu);
	*pobject = nv_object(mmu);
	if (ret)
		return ret;

	priv->base.create = nv04_vm_create;
	priv->base.limit = NV04_PDMA_SIZE;
	priv->base.dma_bits = 32;
	priv->base.pgt_bits = 32 - 12;
	priv->base.spg_shift = 12;
	priv->base.lpg_shift = 12;
	priv->base.map_sg = nv04_vm_map_sg;
	priv->base.unmap = nv04_vm_unmap;
	priv->base.flush = nv04_vm_flush;

	ret = nvkm_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
			     &priv->vm);
	mmu->base.create = nv04_vm_create;
	mmu->base.limit = NV04_PDMA_SIZE;
	mmu->base.dma_bits = 32;
	mmu->base.pgt_bits = 32 - 12;
	mmu->base.spg_shift = 12;
	mmu->base.lpg_shift = 12;
	mmu->base.map_sg = nv04_vm_map_sg;
	mmu->base.unmap = nv04_vm_unmap;
	mmu->base.flush = nv04_vm_flush;

	ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096,
			     &mmu->vm);
	if (ret)
		return ret;

	ret = nvkm_gpuobj_new(nv_object(priv), NULL,
	ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
			      (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
			      16, NVOBJ_FLAG_ZERO_ALLOC,
			      &priv->vm->pgt[0].obj[0]);
	dma = priv->vm->pgt[0].obj[0];
	priv->vm->pgt[0].refcount[0] = 1;
			      &mmu->vm->pgt[0].obj[0]);
	dma = mmu->vm->pgt[0].obj[0];
	mmu->vm->pgt[0].refcount[0] = 1;
	if (ret)
		return ret;

@@ -126,16 +126,16 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
void
nv04_mmu_dtor(struct nvkm_object *object)
{
	struct nv04_mmu_priv *priv = (void *)object;
	if (priv->vm) {
		nvkm_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
		nvkm_vm_ref(NULL, &priv->vm, NULL);
	struct nv04_mmu *mmu = (void *)object;
	if (mmu->vm) {
		nvkm_gpuobj_ref(NULL, &mmu->vm->pgt[0].obj[0]);
		nvkm_vm_ref(NULL, &mmu->vm, NULL);
	}
	if (priv->nullp) {
		pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
				    priv->nullp, priv->null);
	if (mmu->nullp) {
		pci_free_consistent(nv_device(mmu)->pdev, 16 * 1024,
				    mmu->nullp, mmu->null);
	}
	nvkm_mmu_destroy(&priv->base);
	nvkm_mmu_destroy(&mmu->base);
}

struct nvkm_oclass
Loading