Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73a60c0d authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/gpuobj: remove flags for vm-mappings



Having GPUOBJ and VM intertwined like this makes it *really* hard to
continue porting to the new driver architecture, split it out in
favour of requiring explit maps be the caller.

It's more flexible and obvious this way anyway...

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 18c9b959
Loading
Loading
Loading
Loading
+25 −11
Original line number Diff line number Diff line
@@ -38,27 +38,38 @@ struct nvc0_copy_engine {
	u32 ctx;
};

struct nvc0_copy_chan {
	struct nouveau_gpuobj *mem;
	struct nouveau_vma vma;
};

static int
nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
{
	struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
	struct nvc0_copy_chan *cctx;
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *ramin = chan->ramin;
	struct nouveau_gpuobj *ctx = NULL;
	int ret;

	ret = nouveau_gpuobj_new(dev, chan, 256, 256,
				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
				 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
	cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
	if (!cctx)
		return -ENOMEM;

	ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
				 NVOBJ_FLAG_ZERO_ALLOC, &cctx->mem);
	if (ret)
		return ret;

	nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
	nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
	dev_priv->engine.instmem.flush(dev);
	ret = nouveau_gpuobj_map_vm(cctx->mem, NV_MEM_ACCESS_RW, chan->vm,
				   &cctx->vma);
	if (ret)
		return ret;

	chan->engctx[engine] = ctx;
	nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset));
	nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset));
	dev_priv->engine.instmem.flush(dev);
	return 0;
}

@@ -73,7 +84,7 @@ static void
nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
{
	struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
	struct nouveau_gpuobj *ctx = chan->engctx[engine];
	struct nvc0_copy_chan *cctx = chan->engctx[engine];
	struct drm_device *dev = chan->dev;
	u32 inst;

@@ -93,9 +104,12 @@ nvc0_copy_context_del(struct nouveau_channel *chan, int engine)

	nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
	nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
	nouveau_gpuobj_ref(NULL, &ctx);

	chan->engctx[engine] = ctx;
	nouveau_gpuobj_unmap(&cctx->vma);
	nouveau_gpuobj_ref(NULL, &cctx->mem);

	kfree(cctx);
	chan->engctx[engine] = NULL;
}

static int
+50 −21
Original line number Diff line number Diff line
@@ -161,50 +161,68 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	int i = 0, gpc, tp, ret;

	ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
				 &grch->unk408004);
	ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, 0, &grch->unk408004);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
				 &grch->unk40800c);
	ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->unk408004_vma);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, 0, &grch->unk40800c);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->unk40800c_vma);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, 0,
				 &grch->unk418810);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
				 &grch->mmio);
	ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW,
				    chan->vm, &grch->unk418810_vma);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, 0, &grch->mmio);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->mmio_vma);
	if (ret)
		return ret;

	nv_wo32(grch->mmio, i++ * 4, 0x00408004);
	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
	nv_wo32(grch->mmio, i++ * 4, grch->unk408004_vma.offset >> 8);
	nv_wo32(grch->mmio, i++ * 4, 0x00408008);
	nv_wo32(grch->mmio, i++ * 4, 0x80000018);

	nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c_vma.offset >> 8);
	nv_wo32(grch->mmio, i++ * 4, 0x00408010);
	nv_wo32(grch->mmio, i++ * 4, 0x80000000);

	nv_wo32(grch->mmio, i++ * 4, 0x00418810);
	nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
	nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810_vma.offset >> 12);
	nv_wo32(grch->mmio, i++ * 4, 0x00419848);
	nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
	nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810_vma.offset >> 12);

	nv_wo32(grch->mmio, i++ * 4, 0x00419004);
	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c_vma.offset >> 8);
	nv_wo32(grch->mmio, i++ * 4, 0x00419008);
	nv_wo32(grch->mmio, i++ * 4, 0x00000000);

	nv_wo32(grch->mmio, i++ * 4, 0x00418808);
	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
	nv_wo32(grch->mmio, i++ * 4, grch->unk408004_vma.offset >> 8);
	nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
	nv_wo32(grch->mmio, i++ * 4, 0x80000018);

@@ -262,19 +280,25 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
		return -ENOMEM;
	chan->engctx[NVOBJ_ENGINE_GR] = grch;

	ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
				 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
	ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, 0,
				 &grch->grctx);
	if (ret)
		goto error;

	ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->grctx_vma);
	if (ret)
		return ret;

	grctx = grch->grctx;

	ret = nvc0_graph_create_context_mmio_list(chan);
	if (ret)
		goto error;

	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
	pinstmem->flush(dev);

	if (!priv->grctx_vals) {
@@ -288,13 +312,13 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)

	if (!nouveau_ctxfw) {
		nv_wo32(grctx, 0x00, grch->mmio_nr);
		nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
		nv_wo32(grctx, 0x04, grch->mmio_vma.offset >> 8);
	} else {
		nv_wo32(grctx, 0xf4, 0);
		nv_wo32(grctx, 0xf8, 0);
		nv_wo32(grctx, 0x10, grch->mmio_nr);
		nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
		nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
		nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio_vma.offset));
		nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio_vma.offset));
		nv_wo32(grctx, 0x1c, 1);
		nv_wo32(grctx, 0x20, 0);
		nv_wo32(grctx, 0x28, 0);
@@ -313,6 +337,11 @@ nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
{
	struct nvc0_graph_chan *grch = chan->engctx[engine];

	nouveau_gpuobj_unmap(&grch->mmio_vma);
	nouveau_gpuobj_unmap(&grch->unk418810_vma);
	nouveau_gpuobj_unmap(&grch->unk40800c_vma);
	nouveau_gpuobj_unmap(&grch->unk408004_vma);
	nouveau_gpuobj_unmap(&grch->grctx_vma);
	nouveau_gpuobj_ref(NULL, &grch->mmio);
	nouveau_gpuobj_ref(NULL, &grch->unk418810);
	nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+5 −0
Original line number Diff line number Diff line
@@ -62,10 +62,15 @@ struct nvc0_graph_priv {

struct nvc0_graph_chan {
	struct nouveau_gpuobj *grctx;
	struct nouveau_vma     grctx_vma;
	struct nouveau_gpuobj *unk408004; /* 0x418810 too */
	struct nouveau_vma     unk408004_vma;
	struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
	struct nouveau_vma     unk40800c_vma;
	struct nouveau_gpuobj *unk418810; /* 0x419848 too */
	struct nouveau_vma     unk418810_vma;
	struct nouveau_gpuobj *mmio;
	struct nouveau_vma     mmio_vma;
	int mmio_nr;
};

+50 −20
Original line number Diff line number Diff line
@@ -137,24 +137,43 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
	int gpc;
	int ret;

	ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
				 &grch->unk408004);
	ret = nouveau_gpuobj_new(dev, NULL, 0x3000, 256, 0, &grch->unk408004);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
				 &grch->unk40800c);
	ret = nouveau_gpuobj_map_vm(grch->unk408004, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->unk408004_vma);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
				 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, 0, &grch->unk40800c);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_map_vm(grch->unk40800c, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->unk40800c_vma);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, 0,
				 &grch->unk418810);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
				 &grch->mmio);
	ret = nouveau_gpuobj_map_vm(grch->unk418810, NV_MEM_ACCESS_RW,
				    chan->vm, &grch->unk418810_vma);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, 0, &grch->mmio);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_map_vm(grch->mmio, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->mmio_vma);
	if (ret)
		return ret;

@@ -163,18 +182,18 @@ nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
	nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v));                     \
	grch->mmio_nr++;                                                       \
} while (0)
	mmio(0x40800c, grch->unk40800c->linst >> 8);
	mmio(0x40800c, grch->unk40800c_vma.offset >> 8);
	mmio(0x408010, 0x80000000);
	mmio(0x419004, grch->unk40800c->linst >> 8);
	mmio(0x419004, grch->unk40800c_vma.offset >> 8);
	mmio(0x419008, 0x00000000);
	mmio(0x4064cc, 0x80000000);
	mmio(0x408004, grch->unk408004->linst >> 8);
	mmio(0x408004, grch->unk408004_vma.offset >> 8);
	mmio(0x408008, 0x80000030);
	mmio(0x418808, grch->unk408004->linst >> 8);
	mmio(0x418808, grch->unk408004_vma.offset >> 8);
	mmio(0x41880c, 0x80000030);
	mmio(0x4064c8, 0x01800600);
	mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
	mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
	mmio(0x418810, 0x80000000 | grch->unk418810_vma.offset >> 12);
	mmio(0x419848, 0x10000000 | grch->unk418810_vma.offset >> 12);
	mmio(0x405830, 0x02180648);
	mmio(0x4064c4, 0x0192ffff);

@@ -214,19 +233,25 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
		return -ENOMEM;
	chan->engctx[NVOBJ_ENGINE_GR] = grch;

	ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
				 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
	ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, 0,
				 &grch->grctx);
	if (ret)
		goto error;

	ret = nouveau_gpuobj_map_vm(grch->grctx, NV_MEM_ACCESS_RW |
				    NV_MEM_ACCESS_SYS, chan->vm,
				    &grch->grctx_vma);
	if (ret)
		return ret;

	grctx = grch->grctx;

	ret = nve0_graph_create_context_mmio_list(chan);
	if (ret)
		goto error;

	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
	pinstmem->flush(dev);

	if (!priv->grctx_vals) {
@@ -240,8 +265,8 @@ nve0_graph_context_new(struct nouveau_channel *chan, int engine)
	nv_wo32(grctx, 0xf4, 0);
	nv_wo32(grctx, 0xf8, 0);
	nv_wo32(grctx, 0x10, grch->mmio_nr);
	nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
	nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
	nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio_vma.offset));
	nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio_vma.offset));
	nv_wo32(grctx, 0x1c, 1);
	nv_wo32(grctx, 0x20, 0);
	nv_wo32(grctx, 0x28, 0);
@@ -260,6 +285,11 @@ nve0_graph_context_del(struct nouveau_channel *chan, int engine)
{
	struct nve0_graph_chan *grch = chan->engctx[engine];

	nouveau_gpuobj_unmap(&grch->mmio_vma);
	nouveau_gpuobj_unmap(&grch->unk418810_vma);
	nouveau_gpuobj_unmap(&grch->unk40800c_vma);
	nouveau_gpuobj_unmap(&grch->unk408004_vma);
	nouveau_gpuobj_unmap(&grch->grctx_vma);
	nouveau_gpuobj_ref(NULL, &grch->mmio);
	nouveau_gpuobj_ref(NULL, &grch->unk418810);
	nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+5 −0
Original line number Diff line number Diff line
@@ -62,10 +62,15 @@ struct nve0_graph_priv {

struct nve0_graph_chan {
	struct nouveau_gpuobj *grctx;
	struct nouveau_vma     grctx_vma;
	struct nouveau_gpuobj *unk408004; /* 0x418810 too */
	struct nouveau_vma     unk408004_vma;
	struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
	struct nouveau_vma     unk40800c_vma;
	struct nouveau_gpuobj *unk418810; /* 0x419848 too */
	struct nouveau_vma     unk418810_vma;
	struct nouveau_gpuobj *mmio;
	struct nouveau_vma     mmio_vma;
	int mmio_nr;
};

Loading