Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cdd1cf79 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter
Browse files

drm: Make drm_read() more robust against multithreaded races



The current implementation of drm_read() faces a number of issues:

1. Upon an error, it consumes the event which may lead to the client
blocking.
2. Upon an error, it forgets about events already copied
3. If it fails to copy a single event with O_NONBLOCK it falls into a
infinite loop of reporting EAGAIN.
3. There is a race between multiple waiters and blocking reads of the
events list.

Here, we inline drm_dequeue_event() into drm_read() so that we can take
the spinlock around the list walking and event copying, and importantly
reorder the error handling to avoid the issues above.

Cc: Takashi Iwai <tiwai@suse.de>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTakashi Iwai <tiwai@suse.de>
Testcase: igt/drm_read
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 01934c2a
Loading
Loading
Loading
Loading
+42 −47
Original line number Diff line number Diff line
@@ -478,64 +478,59 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);

static bool
drm_dequeue_event(struct drm_file *file_priv,
		  size_t total, size_t max, struct drm_pending_event **out)
ssize_t drm_read(struct file *filp, char __user *buffer,
		 size_t count, loff_t *offset)
{
	struct drm_file *file_priv = filp->private_data;
	struct drm_device *dev = file_priv->minor->dev;
	struct drm_pending_event *e;
	unsigned long flags;
	bool ret = false;
	ssize_t ret = 0;

	spin_lock_irqsave(&dev->event_lock, flags);

	*out = NULL;
	if (list_empty(&file_priv->event_list))
		goto out;
	e = list_first_entry(&file_priv->event_list,
			     struct drm_pending_event, link);
	if (e->event->length + total > max)
		goto out;
	if (!access_ok(VERIFY_WRITE, buffer, count))
		return -EFAULT;

	file_priv->event_space += e->event->length;
	list_del(&e->link);
	*out = e;
	ret = true;
	spin_lock_irq(&dev->event_lock);
	for (;;) {
		if (list_empty(&file_priv->event_list)) {
			if (ret)
				break;

out:
	spin_unlock_irqrestore(&dev->event_lock, flags);
	return ret;
			if (filp->f_flags & O_NONBLOCK) {
				ret = -EAGAIN;
				break;
			}

ssize_t drm_read(struct file *filp, char __user *buffer,
		 size_t count, loff_t *offset)
{
	struct drm_file *file_priv = filp->private_data;
	struct drm_pending_event *e;
	size_t total;
	ssize_t ret;

	if ((filp->f_flags & O_NONBLOCK) == 0) {
			spin_unlock_irq(&dev->event_lock);
			ret = wait_event_interruptible(file_priv->event_wait,
						       !list_empty(&file_priv->event_list));
			spin_lock_irq(&dev->event_lock);
			if (ret < 0)
			return ret;
	}
				break;

			ret = 0;
		} else {
			struct drm_pending_event *e;

	total = 0;
	while (drm_dequeue_event(file_priv, total, count, &e)) {
		if (copy_to_user(buffer + total,
			e = list_first_entry(&file_priv->event_list,
					     struct drm_pending_event, link);
			if (e->event->length + ret > count)
				break;

			if (__copy_to_user_inatomic(buffer + ret,
						    e->event, e->event->length)) {
			total = -EFAULT;
			e->destroy(e);
				if (ret == 0)
					ret = -EFAULT;
				break;
			}

		total += e->event->length;
			file_priv->event_space += e->event->length;
			ret += e->event->length;
			list_del(&e->link);
			e->destroy(e);
		}
	}
	spin_unlock_irq(&dev->event_lock);

	return total ?: -EAGAIN;
	return ret;
}
EXPORT_SYMBOL(drm_read);