Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ca97a366 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nv50-/vm: take mutex rather than irqsave spinlock



These operations can take quite some time, and we really don't want to
have to hold a spinlock for too long.

Now that the lock ordering for vm and the gr/nv84 hw bug workaround has
been reversed, it's possible to use a mutex here.

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 464d636b
Loading
Loading
Loading
Loading
+2 −5
Original line number Original line Diff line number Diff line
@@ -31,7 +31,6 @@


struct nv50_vmmgr_priv {
struct nv50_vmmgr_priv {
	struct nouveau_vmmgr base;
	struct nouveau_vmmgr base;
	spinlock_t lock;
};
};


static void
static void
@@ -153,10 +152,9 @@ nv50_vm_flush(struct nouveau_vm *vm)
{
{
	struct nv50_vmmgr_priv *priv = (void *)vm->vmm;
	struct nv50_vmmgr_priv *priv = (void *)vm->vmm;
	struct nouveau_engine *engine;
	struct nouveau_engine *engine;
	unsigned long flags;
	int i, vme;
	int i, vme;


	spin_lock_irqsave(&priv->lock, flags);
	mutex_lock(&nv_subdev(priv)->mutex);
	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
	for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
		if (!atomic_read(&vm->engref[i]))
		if (!atomic_read(&vm->engref[i]))
			continue;
			continue;
@@ -182,7 +180,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
		if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
		if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
			nv_error(priv, "vm flush timeout: engine %d\n", vme);
			nv_error(priv, "vm flush timeout: engine %d\n", vme);
	}
	}
	spin_unlock_irqrestore(&priv->lock, flags);
	mutex_unlock(&nv_subdev(priv)->mutex);
}
}


static int
static int
@@ -220,7 +218,6 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
	priv->base.map_sg = nv50_vm_map_sg;
	priv->base.map_sg = nv50_vm_map_sg;
	priv->base.unmap = nv50_vm_unmap;
	priv->base.unmap = nv50_vm_unmap;
	priv->base.flush = nv50_vm_flush;
	priv->base.flush = nv50_vm_flush;
	spin_lock_init(&priv->lock);
	return 0;
	return 0;
}
}


+2 −5
Original line number Original line Diff line number Diff line
@@ -32,7 +32,6 @@


struct nvc0_vmmgr_priv {
struct nvc0_vmmgr_priv {
	struct nouveau_vmmgr base;
	struct nouveau_vmmgr base;
	spinlock_t lock;
};
};




@@ -164,12 +163,11 @@ void
nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
{
{
	struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
	struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
	unsigned long flags;


	/* looks like maybe a "free flush slots" counter, the
	/* looks like maybe a "free flush slots" counter, the
	 * faster you write to 0x100cbc to more it decreases
	 * faster you write to 0x100cbc to more it decreases
	 */
	 */
	spin_lock_irqsave(&priv->lock, flags);
	mutex_lock(&nv_subdev(priv)->mutex);
	if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
	if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
		nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
		nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
			 nv_rd32(subdev, 0x100c80), type);
			 nv_rd32(subdev, 0x100c80), type);
@@ -183,7 +181,7 @@ nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
		nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
		nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
			 nv_rd32(subdev, 0x100c80), type);
			 nv_rd32(subdev, 0x100c80), type);
	}
	}
	spin_unlock_irqrestore(&priv->lock, flags);
	mutex_unlock(&nv_subdev(priv)->mutex);
}
}


static void
static void
@@ -227,7 +225,6 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
	priv->base.map_sg = nvc0_vm_map_sg;
	priv->base.map_sg = nvc0_vm_map_sg;
	priv->base.unmap = nvc0_vm_unmap;
	priv->base.unmap = nvc0_vm_unmap;
	priv->base.flush = nvc0_vm_flush;
	priv->base.flush = nvc0_vm_flush;
	spin_lock_init(&priv->lock);
	return 0;
	return 0;
}
}