Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 67cb4056 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge
Browse files

Merge branch 'xen/dev-evtchn' into upstream/evtchn

* xen/dev-evtchn:
  xen/evtchn: add missing static
  xen/evtchn: Fix name of Xen event-channel device
  xen/evtchn: don't do unbind_from_irqhandler under spinlock
  xen/evtchn: remove spurious barrier
  xen/evtchn: ports start enabled
  xen/evtchn: dynamically allocate port_user array
  xen/evtchn: track enabled state for each port
parents bc7fc5e3 70697d54
Loading
Loading
Loading
Loading
+74 −22
Original line number Diff line number Diff line
@@ -69,20 +69,51 @@ struct per_user_data {
	const char *name;
};

/* Who's bound to each port? */
static struct per_user_data *port_user[NR_EVENT_CHANNELS];
/*
 * Who's bound to each port?  This is logically an array of struct
 * per_user_data *, but we encode the current enabled-state in bit 0.
 */
static unsigned long *port_user;
static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */

irqreturn_t evtchn_interrupt(int irq, void *data)
static inline struct per_user_data *get_port_user(unsigned port)
{
	return (struct per_user_data *)(port_user[port] & ~1);
}

static inline void set_port_user(unsigned port, struct per_user_data *u)
{
	port_user[port] = (unsigned long)u;
}

static inline bool get_port_enabled(unsigned port)
{
	return port_user[port] & 1;
}

static inline void set_port_enabled(unsigned port, bool enabled)
{
	if (enabled)
		port_user[port] |= 1;
	else
		port_user[port] &= ~1;
}

static irqreturn_t evtchn_interrupt(int irq, void *data)
{
	unsigned int port = (unsigned long)data;
	struct per_user_data *u;

	spin_lock(&port_user_lock);

	u = port_user[port];
	u = get_port_user(port);

	WARN(!get_port_enabled(port),
	     "Interrupt for port %d, but apparently not enabled; per-user %p\n",
	     port, u);

	disable_irq_nosync(irq);
	set_port_enabled(port, false);

	if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
		u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
@@ -92,9 +123,8 @@ irqreturn_t evtchn_interrupt(int irq, void *data)
			kill_fasync(&u->evtchn_async_queue,
				    SIGIO, POLL_IN);
		}
	} else {
	} else
		u->ring_overflow = 1;
	}

	spin_unlock(&port_user_lock);

@@ -198,9 +228,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
		goto out;

	spin_lock_irq(&port_user_lock);
	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
			enable_irq(irq_from_evtchn(kbuf[i]));

	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
		unsigned port = kbuf[i];

		if (port < NR_EVENT_CHANNELS &&
		    get_port_user(port) == u &&
		    !get_port_enabled(port)) {
			set_port_enabled(port, true);
			enable_irq(irq_from_evtchn(port));
		}
	}

	spin_unlock_irq(&port_user_lock);

	rc = count;
@@ -222,8 +261,9 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
	 * interrupt handler yet, and our caller has already
	 * serialized bind operations.)
	 */
	BUG_ON(port_user[port] != NULL);
	port_user[port] = u;
	BUG_ON(get_port_user(port) != NULL);
	set_port_user(port, u);
	set_port_enabled(port, true); /* start enabled */

	rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
				       u->name, (void *)(unsigned long)port);
@@ -239,10 +279,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)

	unbind_from_irqhandler(irq, (void *)(unsigned long)port);

	/* make sure we unbind the irq handler before clearing the port */
	barrier();

	port_user[port] = NULL;
	set_port_user(port, NULL);
}

static long evtchn_ioctl(struct file *file,
@@ -333,15 +370,17 @@ static long evtchn_ioctl(struct file *file,
		spin_lock_irq(&port_user_lock);

		rc = -ENOTCONN;
		if (port_user[unbind.port] != u) {
		if (get_port_user(unbind.port) != u) {
			spin_unlock_irq(&port_user_lock);
			break;
		}

		evtchn_unbind_from_user(u, unbind.port);
		disable_irq(irq_from_evtchn(unbind.port));

		spin_unlock_irq(&port_user_lock);

		evtchn_unbind_from_user(u, unbind.port);

		rc = 0;
		break;
	}
@@ -355,7 +394,7 @@ static long evtchn_ioctl(struct file *file,

		if (notify.port >= NR_EVENT_CHANNELS) {
			rc = -EINVAL;
		} else if (port_user[notify.port] != u) {
		} else if (get_port_user(notify.port) != u) {
			rc = -ENOTCONN;
		} else {
			notify_remote_via_evtchn(notify.port);
@@ -444,14 +483,21 @@ static int evtchn_release(struct inode *inode, struct file *filp)
	free_page((unsigned long)u->ring);

	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
		if (port_user[i] != u)
		if (get_port_user(i) != u)
			continue;

		evtchn_unbind_from_user(port_user[i], i);
		disable_irq(irq_from_evtchn(i));
	}

	spin_unlock_irq(&port_user_lock);

	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
		if (get_port_user(i) != u)
			continue;

		evtchn_unbind_from_user(get_port_user(i), i);
	}

	kfree(u->name);
	kfree(u);

@@ -472,7 +518,7 @@ static const struct file_operations evtchn_fops = {

static struct miscdevice evtchn_miscdev = {
	.minor        = MISC_DYNAMIC_MINOR,
	.name         = "evtchn",
	.name         = "xen/evtchn",
	.fops         = &evtchn_fops,
};
static int __init evtchn_init(void)
@@ -482,8 +528,11 @@ static int __init evtchn_init(void)
	if (!xen_domain())
		return -ENODEV;

	port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL);
	if (port_user == NULL)
		return -ENOMEM;

	spin_lock_init(&port_user_lock);
	memset(port_user, 0, sizeof(port_user));

	/* Create '/dev/misc/evtchn'. */
	err = misc_register(&evtchn_miscdev);
@@ -499,6 +548,9 @@ static int __init evtchn_init(void)

static void __exit evtchn_cleanup(void)
{
	kfree(port_user);
	port_user = NULL;

	misc_deregister(&evtchn_miscdev);
}