Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 79ca2770 authored by Ben Skeggs's avatar Ben Skeggs
Browse files

drm/nouveau/core: rework event interface



This is a lot of prep-work for being able to send event notifications
back to userspace.  Events now contain data, rather than a "something
just happened" signal.

Handler data is now embedded into a containing structure, rather than
being kmalloc()'d, and can optionally have the notify routine handled
in a workqueue.

Various races between suspend/unload with display HPD/DP IRQ handlers
automagically solved as a result.

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 4d681b66
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
nouveau-y += core/core/mm.o
nouveau-y += core/core/namedb.o
nouveau-y += core/core/notify.o
nouveau-y += core/core/object.o
nouveau-y += core/core/option.o
nouveau-y += core/core/parent.o
+40 −136
Original line number Diff line number Diff line
/*
 * Copyright 2013 Red Hat Inc.
 * Copyright 2013-2014 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
@@ -24,173 +24,77 @@
#include <core/event.h>

void
nouveau_event_put(struct nouveau_eventh *handler)
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
	struct nouveau_event *event = handler->event;
	unsigned long flags;
	u32 m, t;

	if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
		return;

	spin_lock_irqsave(&event->refs_lock, flags);
	for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
		if (!--event->refs[handler->index * event->types_nr + t]) {
			if (event->disable)
				event->disable(event, 1 << t, handler->index);
	BUG_ON(!spin_is_locked(&event->refs_lock));
	while (types) {
		int type = __ffs(types); types &= ~(1 << type);
		if (--event->refs[index * event->types_nr + type] == 0) {
			if (event->func->fini)
				event->func->fini(event, 1 << type, index);
		}

	}
	spin_unlock_irqrestore(&event->refs_lock, flags);
}

void
nouveau_event_get(struct nouveau_eventh *handler)
{
	struct nouveau_event *event = handler->event;
	unsigned long flags;
	u32 m, t;

	if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
		return;

	spin_lock_irqsave(&event->refs_lock, flags);
	for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
		if (!event->refs[handler->index * event->types_nr + t]++) {
			if (event->enable)
				event->enable(event, 1 << t, handler->index);
		}

	}
	spin_unlock_irqrestore(&event->refs_lock, flags);
}

static void
nouveau_event_fini(struct nouveau_eventh *handler)
{
	struct nouveau_event *event = handler->event;
	unsigned long flags;
	nouveau_event_put(handler);
	spin_lock_irqsave(&event->list_lock, flags);
	list_del(&handler->head);
	spin_unlock_irqrestore(&event->list_lock, flags);
}

static int
nouveau_event_init(struct nouveau_event *event, u32 types, int index,
		   int (*func)(void *, u32, int), void *priv,
		   struct nouveau_eventh *handler)
{
	unsigned long flags;

	if (types & ~((1 << event->types_nr) - 1))
		return -EINVAL;
	if (index >= event->index_nr)
		return -EINVAL;

	handler->event = event;
	handler->flags = 0;
	handler->types = types;
	handler->index = index;
	handler->func = func;
	handler->priv = priv;

	spin_lock_irqsave(&event->list_lock, flags);
	list_add_tail(&handler->head, &event->list[index]);
	spin_unlock_irqrestore(&event->list_lock, flags);
	return 0;
}

int
nouveau_event_new(struct nouveau_event *event, u32 types, int index,
		  int (*func)(void *, u32, int), void *priv,
		  struct nouveau_eventh **phandler)
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
	struct nouveau_eventh *handler;
	int ret = -ENOMEM;

	if (event->check) {
		ret = event->check(event, types, index);
		if (ret)
			return ret;
	BUG_ON(!spin_is_locked(&event->refs_lock));
	while (types) {
		int type = __ffs(types); types &= ~(1 << type);
		if (++event->refs[index * event->types_nr + type] == 1) {
			if (event->func->init)
				event->func->init(event, 1 << type, index);
		}

	handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
	if (handler) {
		ret = nouveau_event_init(event, types, index, func, priv, handler);
		if (ret)
			kfree(handler);
	}

	return ret;
}

void
nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
{
	BUG_ON(handler != NULL);
	if (*ref) {
		nouveau_event_fini(*ref);
		kfree(*ref);
	}
	*ref = handler;
}

void
nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
nvkm_event_send(struct nvkm_event *event, u32 types, int index,
		void *data, u32 size)
{
	struct nouveau_eventh *handler;
	struct nvkm_notify *notify;
	unsigned long flags;

	if (WARN_ON(index >= event->index_nr))
	if (!event->refs || WARN_ON(index >= event->index_nr))
		return;

	spin_lock_irqsave(&event->list_lock, flags);
	list_for_each_entry(handler, &event->list[index], head) {
		if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
			continue;
		if (!(handler->types & types))
	list_for_each_entry(notify, &event->list, head) {
		if (notify->index == index && (notify->types & types)) {
			if (event->func->send) {
				event->func->send(data, size, notify);
				continue;
		if (handler->func(handler->priv, handler->types & types, index)
				!= NVKM_EVENT_DROP)
			continue;
		nouveau_event_put(handler);
			}
			nvkm_notify_send(notify, data, size);
		}
	}
	spin_unlock_irqrestore(&event->list_lock, flags);
}

void
nouveau_event_destroy(struct nouveau_event **pevent)
nvkm_event_fini(struct nvkm_event *event)
{
	struct nouveau_event *event = *pevent;
	if (event) {
		kfree(event);
		*pevent = NULL;
	if (event->refs) {
		kfree(event->refs);
		event->refs = NULL;
	}
}

int
nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
		struct nvkm_event *event)
{
	struct nouveau_event *event;
	int i;

	event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
				  sizeof(event->refs[0]), GFP_KERNEL);
	if (!event)
	event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr,
			      GFP_KERNEL);
	if (!event->refs)
		return -ENOMEM;

	event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
	if (!event->list) {
		kfree(event);
		return -ENOMEM;
	}

	spin_lock_init(&event->list_lock);
	spin_lock_init(&event->refs_lock);
	for (i = 0; i < index_nr; i++)
		INIT_LIST_HEAD(&event->list[i]);
	event->func = func;
	event->types_nr = types_nr;
	event->index_nr = index_nr;
	spin_lock_init(&event->refs_lock);
	spin_lock_init(&event->list_lock);
	INIT_LIST_HEAD(&event->list);
	return 0;
}
+164 −0
Original line number Diff line number Diff line
/*
 * Copyright 2014 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs <bskeggs@redhat.com>
 */

#include <core/os.h>
#include <core/event.h>
#include <core/notify.h>

static inline void
nvkm_notify_put_locked(struct nvkm_notify *notify)
{
	if (notify->block++ == 0)
		nvkm_event_put(notify->event, notify->types, notify->index);
}

void
nvkm_notify_put(struct nvkm_notify *notify)
{
	struct nvkm_event *event = notify->event;
	unsigned long flags;
	if (likely(event) &&
	    test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
		spin_lock_irqsave(&event->refs_lock, flags);
		nvkm_notify_put_locked(notify);
		spin_unlock_irqrestore(&event->refs_lock, flags);
		if (test_bit(NVKM_NOTIFY_WORK, &notify->flags))
			flush_work(&notify->work);
	}
}

static inline void
nvkm_notify_get_locked(struct nvkm_notify *notify)
{
	if (--notify->block == 0)
		nvkm_event_get(notify->event, notify->types, notify->index);
}

void
nvkm_notify_get(struct nvkm_notify *notify)
{
	struct nvkm_event *event = notify->event;
	unsigned long flags;
	if (likely(event) &&
	    !test_and_set_bit(NVKM_NOTIFY_USER, &notify->flags)) {
		spin_lock_irqsave(&event->refs_lock, flags);
		nvkm_notify_get_locked(notify);
		spin_unlock_irqrestore(&event->refs_lock, flags);
	}
}

static inline void
nvkm_notify_func(struct nvkm_notify *notify)
{
	struct nvkm_event *event = notify->event;
	int ret = notify->func(notify);
	unsigned long flags;
	if ((ret == NVKM_NOTIFY_KEEP) ||
	    !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
		spin_lock_irqsave(&event->refs_lock, flags);
		nvkm_notify_get_locked(notify);
		spin_unlock_irqrestore(&event->refs_lock, flags);
	}
}

static void
nvkm_notify_work(struct work_struct *work)
{
	struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
	nvkm_notify_func(notify);
}

void
nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
{
	struct nvkm_event *event = notify->event;
	unsigned long flags;

	BUG_ON(!spin_is_locked(&event->list_lock));
	BUG_ON(size != notify->size);

	spin_lock_irqsave(&event->refs_lock, flags);
	if (notify->block) {
		spin_unlock_irqrestore(&event->refs_lock, flags);
		return;
	}
	nvkm_notify_put_locked(notify);
	spin_unlock_irqrestore(&event->refs_lock, flags);

	if (test_bit(NVKM_NOTIFY_WORK, &notify->flags)) {
		memcpy((void *)notify->data, data, size);
		schedule_work(&notify->work);
	} else {
		notify->data = data;
		nvkm_notify_func(notify);
		notify->data = NULL;
	}
}

void
nvkm_notify_fini(struct nvkm_notify *notify)
{
	unsigned long flags;
	if (notify->event) {
		nvkm_notify_put(notify);
		spin_lock_irqsave(&notify->event->list_lock, flags);
		list_del(&notify->head);
		spin_unlock_irqrestore(&notify->event->list_lock, flags);
		kfree((void *)notify->data);
		notify->event = NULL;
	}
}

int
nvkm_notify_init(struct nvkm_event *event, int (*func)(struct nvkm_notify *),
		 bool work, void *data, u32 size, u32 reply,
		 struct nvkm_notify *notify)
{
	unsigned long flags;
	int ret = -ENODEV;
	if ((notify->event = event), event->refs) {
		ret = event->func->ctor(data, size, notify);
		if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
			notify->flags = 0;
			notify->block = 1;
			notify->func = func;
			notify->data = NULL;
			if (ret = 0, work) {
				INIT_WORK(&notify->work, nvkm_notify_work);
				set_bit(NVKM_NOTIFY_WORK, &notify->flags);
				notify->data = kmalloc(reply, GFP_KERNEL);
				if (!notify->data)
					ret = -ENOMEM;
			}
		}
		if (ret == 0) {
			spin_lock_irqsave(&event->list_lock, flags);
			list_add_tail(&notify->head, &event->list);
			spin_unlock_irqrestore(&event->list_lock, flags);
		}
	}
	if (ret)
		notify->event = NULL;
	return ret;
}
+1 −1
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
	struct acpi_bus_event *info = data;

	if (!strcmp(info->device_class, "ac_adapter"))
		nouveau_event_trigger(device->ntfy, 1, NVKM_DEVICE_NTFY_POWER);
		nvkm_event_send(&device->event, 1, 0, NULL, 0);

	return NOTIFY_DONE;
}
+21 −2
Original line number Diff line number Diff line
@@ -364,12 +364,30 @@ nouveau_devobj_ofuncs = {
/******************************************************************************
 * nouveau_device: engine functions
 *****************************************************************************/

static struct nouveau_oclass
nouveau_device_sclass[] = {
	{ 0x0080, &nouveau_devobj_ofuncs },
	{}
};

static int
nouveau_device_event_ctor(void *data, u32 size, struct nvkm_notify *notify)
{
	if (!WARN_ON(size != 0)) {
		notify->size  = 0;
		notify->types = 1;
		notify->index = 0;
		return 0;
	}
	return -EINVAL;
}

static const struct nvkm_event_func
nouveau_device_event_func = {
	.ctor = nouveau_device_event_ctor,
};

static int
nouveau_device_fini(struct nouveau_object *object, bool suspend)
{
@@ -445,7 +463,7 @@ nouveau_device_dtor(struct nouveau_object *object)
{
	struct nouveau_device *device = (void *)object;

	nouveau_event_destroy(&device->ntfy);
	nvkm_event_fini(&device->event);

	mutex_lock(&nv_devices_mutex);
	list_del(&device->head);
@@ -545,7 +563,8 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
	nv_engine(device)->sclass = nouveau_device_sclass;
	list_add(&device->head, &nv_devices);

	ret = nouveau_event_create(1, NVKM_DEVICE_NTFY, &device->ntfy);
	ret = nvkm_event_init(&nouveau_device_event_func, 1, 1,
			      &device->event);
done:
	mutex_unlock(&nv_devices_mutex);
	return ret;
Loading