Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 69aa40e2 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/fifo/gk104: cosmetic engine->runlist changes



Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent acdf7d4f
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -7,7 +7,7 @@
struct gk104_fifo_chan {
struct gk104_fifo_chan {
	struct nvkm_fifo_chan base;
	struct nvkm_fifo_chan base;
	struct gk104_fifo *fifo;
	struct gk104_fifo *fifo;
	int engine;
	int runl;


	struct list_head head;
	struct list_head head;
	bool killed;
	bool killed;
+35 −32
Original line number Original line Diff line number Diff line
@@ -47,38 +47,41 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
}
}


void
void
gk104_fifo_runlist_commit(struct gk104_fifo *fifo, u32 engine)
gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
{
{
	struct gk104_fifo_engn *engn = &fifo->engine[engine];
	struct gk104_fifo_chan *chan;
	struct gk104_fifo_chan *chan;
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
	struct nvkm_device *device = subdev->device;
	struct nvkm_device *device = subdev->device;
	struct nvkm_memory *cur;
	struct nvkm_memory *mem;
	int nr = 0;
	int nr = 0;
	int target;
	int target;


	mutex_lock(&subdev->mutex);
	mutex_lock(&subdev->mutex);
	cur = engn->runlist[engn->cur_runlist];
	mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
	engn->cur_runlist = !engn->cur_runlist;
	fifo->runlist[runl].next = !fifo->runlist[runl].next;


	nvkm_kmap(cur);
	nvkm_kmap(mem);
	list_for_each_entry(chan, &engn->chan, head) {
	list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
		nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
		nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
		nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
		nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
		nr++;
		nr++;
	}
	}
	nvkm_done(cur);
	nvkm_done(mem);


	target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
	if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
		target = 0;
	else
		target = 3;


	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
	nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
				    (target << 28));
				    (target << 28));
	nvkm_wr32(device, 0x002274, (engine << 20) | nr);
	nvkm_wr32(device, 0x002274, (runl << 20) | nr);


	if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
	if (wait_event_timeout(fifo->runlist[runl].wait,
			       (engine * 0x08)) & 0x00100000),
			       !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
				       & 0x00100000),
			       msecs_to_jiffies(2000)) == 0)
			       msecs_to_jiffies(2000)) == 0)
		nvkm_error(subdev, "runlist %d update timeout\n", engine);
		nvkm_error(subdev, "runlist %d update timeout\n", runl);
	mutex_unlock(&subdev->mutex);
	mutex_unlock(&subdev->mutex);
}
}


@@ -94,7 +97,7 @@ void
gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{
{
	mutex_lock(&fifo->base.engine.subdev.mutex);
	mutex_lock(&fifo->base.engine.subdev.mutex);
	list_add_tail(&chan->head, &fifo->engine[chan->engine].chan);
	list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
	mutex_unlock(&fifo->base.engine.subdev.mutex);
	mutex_unlock(&fifo->base.engine.subdev.mutex);
}
}


@@ -199,7 +202,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
	u32 engn;
	u32 engn;


	spin_lock_irqsave(&fifo->base.lock, flags);
	spin_lock_irqsave(&fifo->base.lock, flags);
	for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
	for (engn = 0; engn < ARRAY_SIZE(fifo->runlist); engn++) {
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
		u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
		u32 busy = (stat & 0x80000000);
		u32 busy = (stat & 0x80000000);
		u32 next = (stat & 0x0fff0000) >> 16;
		u32 next = (stat & 0x0fff0000) >> 16;
@@ -211,7 +214,7 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
		(void)save;
		(void)save;


		if (busy && chsw) {
		if (busy && chsw) {
			list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
			list_for_each_entry(chan, &fifo->runlist[engn].chan, head) {
				if (chan->base.chid == chid) {
				if (chan->base.chid == chid) {
					engine = gk104_fifo_engine(fifo, engn);
					engine = gk104_fifo_engine(fifo, engn);
					if (!engine)
					if (!engine)
@@ -541,10 +544,10 @@ gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	struct nvkm_device *device = fifo->base.engine.subdev.device;
	u32 mask = nvkm_rd32(device, 0x002a00);
	u32 mask = nvkm_rd32(device, 0x002a00);
	while (mask) {
	while (mask) {
		u32 engn = __ffs(mask);
		int runl = __ffs(mask);
		wake_up(&fifo->engine[engn].wait);
		wake_up(&fifo->runlist[runl].wait);
		nvkm_wr32(device, 0x002a00, 1 << engn);
		nvkm_wr32(device, 0x002a00, 1 << runl);
		mask &= ~(1 << engn);
		mask &= ~(1 << runl);
	}
	}
}
}


@@ -669,21 +672,21 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
	fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
	fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
	nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
	nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);


	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
	for (i = 0; i < ARRAY_SIZE(fifo->runlist); i++) {
		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[0]);
				      &fifo->runlist[i].mem[0]);
		if (ret)
		if (ret)
			return ret;
			return ret;


		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
		ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
				      0x8000, 0x1000, false,
				      0x8000, 0x1000, false,
				      &fifo->engine[i].runlist[1]);
				      &fifo->runlist[i].mem[1]);
		if (ret)
		if (ret)
			return ret;
			return ret;


		init_waitqueue_head(&fifo->engine[i].wait);
		init_waitqueue_head(&fifo->runlist[i].wait);
		INIT_LIST_HEAD(&fifo->engine[i].chan);
		INIT_LIST_HEAD(&fifo->runlist[i].chan);
	}
	}


	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
@@ -739,9 +742,9 @@ gk104_fifo_dtor(struct nvkm_fifo *base)
	nvkm_vm_put(&fifo->user.bar);
	nvkm_vm_put(&fifo->user.bar);
	nvkm_memory_del(&fifo->user.mem);
	nvkm_memory_del(&fifo->user.mem);


	for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
	for (i = 0; i < ARRAY_SIZE(fifo->runlist); i++) {
		nvkm_memory_del(&fifo->engine[i].runlist[1]);
		nvkm_memory_del(&fifo->runlist[i].mem[1]);
		nvkm_memory_del(&fifo->engine[i].runlist[0]);
		nvkm_memory_del(&fifo->runlist[i].mem[0]);
	}
	}


	return fifo;
	return fifo;
+8 −9
Original line number Original line Diff line number Diff line
@@ -6,13 +6,6 @@
#include <subdev/mmu.h>
#include <subdev/mmu.h>


struct gk104_fifo_chan;
struct gk104_fifo_chan;
struct gk104_fifo_engn {
	struct nvkm_memory *runlist[2];
	int cur_runlist;
	wait_queue_head_t wait;
	struct list_head chan;
};

struct gk104_fifo {
struct gk104_fifo {
	struct nvkm_fifo base;
	struct nvkm_fifo base;


@@ -23,7 +16,13 @@ struct gk104_fifo {


	int pbdma_nr;
	int pbdma_nr;


	struct gk104_fifo_engn engine[7];
	struct {
		struct nvkm_memory *mem[2];
		int next;
		wait_queue_head_t wait;
		struct list_head chan;
	} runlist[7];

	struct {
	struct {
		struct nvkm_memory *mem;
		struct nvkm_memory *mem;
		struct nvkm_vma bar;
		struct nvkm_vma bar;
@@ -41,7 +40,7 @@ void gk104_fifo_uevent_init(struct nvkm_fifo *);
void gk104_fifo_uevent_fini(struct nvkm_fifo *);
void gk104_fifo_uevent_fini(struct nvkm_fifo *);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_commit(struct gk104_fifo *, u32 engine);
void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);


static inline u64
static inline u64
gk104_fifo_engine_subdev(int engine)
gk104_fifo_engine_subdev(int engine)
+6 −6
Original line number Original line Diff line number Diff line
@@ -155,7 +155,7 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
		gk104_fifo_runlist_remove(fifo, chan);
		gk104_fifo_runlist_remove(fifo, chan);
		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
		nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
		gk104_fifo_gpfifo_kick(chan);
		gk104_fifo_gpfifo_kick(chan);
		gk104_fifo_runlist_commit(fifo, chan->engine);
		gk104_fifo_runlist_commit(fifo, chan->runl);
	}
	}


	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
	nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -170,13 +170,13 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
	u32 addr = chan->base.inst->addr >> 12;
	u32 addr = chan->base.inst->addr >> 12;
	u32 coff = chan->base.chid * 8;
	u32 coff = chan->base.chid * 8;


	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->engine << 16);
	nvkm_mask(device, 0x800004 + coff, 0x000f0000, chan->runl << 16);
	nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);
	nvkm_wr32(device, 0x800000 + coff, 0x80000000 | addr);


	if (list_empty(&chan->head) && !chan->killed) {
	if (list_empty(&chan->head) && !chan->killed) {
		gk104_fifo_runlist_insert(fifo, chan);
		gk104_fifo_runlist_insert(fifo, chan);
		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
		gk104_fifo_runlist_commit(fifo, chan->engine);
		gk104_fifo_runlist_commit(fifo, chan->runl);
		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
		nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
	}
	}
}
}
@@ -227,7 +227,7 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
		return ret;
		return ret;


	/* determine which downstream engines are present */
	/* determine which downstream engines are present */
	for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->engine); i++) {
	for (i = 0, engines = 0; i < ARRAY_SIZE(fifo->runlist); i++) {
		u64 subdevs = gk104_fifo_engine_subdev(i);
		u64 subdevs = gk104_fifo_engine_subdev(i);
		if (!nvkm_device_engine(device, __ffs64(subdevs)))
		if (!nvkm_device_engine(device, __ffs64(subdevs)))
			continue;
			continue;
@@ -255,12 +255,12 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
		return -ENOMEM;
		return -ENOMEM;
	*pobject = &chan->base.object;
	*pobject = &chan->base.object;
	chan->fifo = fifo;
	chan->fifo = fifo;
	chan->engine = __ffs(args->v0.engine);
	chan->runl = __ffs(args->v0.engine);
	INIT_LIST_HEAD(&chan->head);
	INIT_LIST_HEAD(&chan->head);


	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
	ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
				  0x1000, 0x1000, true, args->v0.vm, 0,
				  0x1000, 0x1000, true, args->v0.vm, 0,
				  gk104_fifo_engine_subdev(chan->engine),
				  gk104_fifo_engine_subdev(chan->runl),
				  1, fifo->user.bar.offset, 0x200,
				  1, fifo->user.bar.offset, 0x200,
				  oclass, &chan->base);
				  oclass, &chan->base);
	if (ret)
	if (ret)