Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1b9dc91e authored by Lars-Peter Clausen's avatar Lars-Peter Clausen Committed by Jonathan Cameron
Browse files

iio: events: Make iio_push_event() IRQ context save



Currently it is not save to call iio_push_event() from hard IRQ context since
the IIO event code uses spin_lock()/spin_unlock() and it is not save to mix
calls to spin_lock()/spin_unlock() from different contexts on the same lock.
E.g. if the lock is being held in iio_event_chrdev_read() and an interrupts
kicks in and the interrupt handler calls iio_push_event() we end uo with a
deadlock.

This patch updates iio_push_event() to use spin_lock_irqsave()/
spin_unlock_irqstrestore(), since it can be called from both IRQ and non-IRQ
context. All other other users of the lock, which are always run in non-IRQ
context, are updated to spin_lock_irq()/spin_unlock_irq().

Signed-off-by: default avatarLars-Peter Clausen <lars@metafoo.de>
Signed-off-by: default avatarJonathan Cameron <jic23@kernel.org>
parent b9606e2a
Loading
Loading
Loading
Loading
+15 −14
Original line number Original line Diff line number Diff line
@@ -46,10 +46,11 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
{
	struct iio_event_interface *ev_int = indio_dev->event_interface;
	struct iio_event_interface *ev_int = indio_dev->event_interface;
	struct iio_event_data ev;
	struct iio_event_data ev;
	unsigned long flags;
	int copied;
	int copied;


	/* Does anyone care? */
	/* Does anyone care? */
	spin_lock(&ev_int->wait.lock);
	spin_lock_irqsave(&ev_int->wait.lock, flags);
	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {


		ev.id = ev_code;
		ev.id = ev_code;
@@ -59,7 +60,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
		if (copied != 0)
		if (copied != 0)
			wake_up_locked_poll(&ev_int->wait, POLLIN);
			wake_up_locked_poll(&ev_int->wait, POLLIN);
	}
	}
	spin_unlock(&ev_int->wait.lock);
	spin_unlock_irqrestore(&ev_int->wait.lock, flags);


	return 0;
	return 0;
}
}
@@ -76,10 +77,10 @@ static unsigned int iio_event_poll(struct file *filep,


	poll_wait(filep, &ev_int->wait, wait);
	poll_wait(filep, &ev_int->wait, wait);


	spin_lock(&ev_int->wait.lock);
	spin_lock_irq(&ev_int->wait.lock);
	if (!kfifo_is_empty(&ev_int->det_events))
	if (!kfifo_is_empty(&ev_int->det_events))
		events = POLLIN | POLLRDNORM;
		events = POLLIN | POLLRDNORM;
	spin_unlock(&ev_int->wait.lock);
	spin_unlock_irq(&ev_int->wait.lock);


	return events;
	return events;
}
}
@@ -96,14 +97,14 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
	if (count < sizeof(struct iio_event_data))
	if (count < sizeof(struct iio_event_data))
		return -EINVAL;
		return -EINVAL;


	spin_lock(&ev_int->wait.lock);
	spin_lock_irq(&ev_int->wait.lock);
	if (kfifo_is_empty(&ev_int->det_events)) {
	if (kfifo_is_empty(&ev_int->det_events)) {
		if (filep->f_flags & O_NONBLOCK) {
		if (filep->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			ret = -EAGAIN;
			goto error_unlock;
			goto error_unlock;
		}
		}
		/* Blocking on device; waiting for something to be there */
		/* Blocking on device; waiting for something to be there */
		ret = wait_event_interruptible_locked(ev_int->wait,
		ret = wait_event_interruptible_locked_irq(ev_int->wait,
					!kfifo_is_empty(&ev_int->det_events));
					!kfifo_is_empty(&ev_int->det_events));
		if (ret)
		if (ret)
			goto error_unlock;
			goto error_unlock;
@@ -113,7 +114,7 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
	ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
	ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);


error_unlock:
error_unlock:
	spin_unlock(&ev_int->wait.lock);
	spin_unlock_irq(&ev_int->wait.lock);


	return ret ? ret : copied;
	return ret ? ret : copied;
}
}
@@ -122,7 +123,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
{
	struct iio_event_interface *ev_int = filep->private_data;
	struct iio_event_interface *ev_int = filep->private_data;


	spin_lock(&ev_int->wait.lock);
	spin_lock_irq(&ev_int->wait.lock);
	__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
	__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
	/*
	/*
	 * In order to maintain a clean state for reopening,
	 * In order to maintain a clean state for reopening,
@@ -130,7 +131,7 @@ static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
	 * any new __iio_push_event calls running.
	 * any new __iio_push_event calls running.
	 */
	 */
	kfifo_reset_out(&ev_int->det_events);
	kfifo_reset_out(&ev_int->det_events);
	spin_unlock(&ev_int->wait.lock);
	spin_unlock_irq(&ev_int->wait.lock);


	return 0;
	return 0;
}
}
@@ -151,18 +152,18 @@ int iio_event_getfd(struct iio_dev *indio_dev)
	if (ev_int == NULL)
	if (ev_int == NULL)
		return -ENODEV;
		return -ENODEV;


	spin_lock(&ev_int->wait.lock);
	spin_lock_irq(&ev_int->wait.lock);
	if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
	if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
		spin_unlock(&ev_int->wait.lock);
		spin_unlock_irq(&ev_int->wait.lock);
		return -EBUSY;
		return -EBUSY;
	}
	}
	spin_unlock(&ev_int->wait.lock);
	spin_unlock_irq(&ev_int->wait.lock);
	fd = anon_inode_getfd("iio:event",
	fd = anon_inode_getfd("iio:event",
				&iio_event_chrdev_fileops, ev_int, O_RDONLY);
				&iio_event_chrdev_fileops, ev_int, O_RDONLY);
	if (fd < 0) {
	if (fd < 0) {
		spin_lock(&ev_int->wait.lock);
		spin_lock_irq(&ev_int->wait.lock);
		__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
		__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
		spin_unlock(&ev_int->wait.lock);
		spin_unlock_irq(&ev_int->wait.lock);
	}
	}
	return fd;
	return fd;
}
}