Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27dd7350 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'msm-fixes-4.8' of git://people.freedesktop.org/~robclark/linux into drm-fixes

copy from user fixes.

* 'msm-fixes-4.8' of git://people.freedesktop.org/~robclark/linux:
  drm/msm: protect against faults from copy_from_user() in submit ioctl
  drm/msm: fix use of copy_from_user() while holding spinlock
parents 8509eb19 d78d383a
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -157,6 +157,12 @@ struct msm_drm_private {
	struct shrinker shrinker;

	struct msm_vblank_ctrl vblank_ctrl;

	/* task holding struct_mutex.. currently only used in submit path
	 * to detect and reject faults from copy_from_user() for submit
	 * ioctl.
	 */
	struct task_struct *struct_mutex_task;
};

struct msm_format {
+9 −0
Original line number Diff line number Diff line
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct page **pages;
	unsigned long pfn;
	pgoff_t pgoff;
	int ret;

	/* This should only happen if userspace tries to pass a mmap'd
	 * but unfaulted gem bo vaddr into submit ioctl, triggering
	 * a page fault while struct_mutex is already held.  This is
	 * not a valid use-case so just bail.
	 */
	if (priv->struct_mutex_task == current)
		return VM_FAULT_SIGBUS;

	/* Make sure we don't parallel update on a fault, nor move or remove
	 * something from beneath our feet
	 */
+25 −5
Original line number Diff line number Diff line
@@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
	kfree(submit);
}

static inline unsigned long __must_check
copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
	if (access_ok(VERIFY_READ, from, n))
		return __copy_from_user_inatomic(to, from, n);
	return -EFAULT;
}

static int submit_lookup_objects(struct msm_gem_submit *submit,
		struct drm_msm_gem_submit *args, struct drm_file *file)
{
@@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
	int ret = 0;

	spin_lock(&file->table_lock);
	pagefault_disable();

	for (i = 0; i < args->nr_bos; i++) {
		struct drm_msm_gem_submit_bo submit_bo;
@@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
		 */
		submit->bos[i].flags = 0;

		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
		if (unlikely(ret)) {
			pagefault_enable();
			spin_unlock(&file->table_lock);
			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
		if (ret) {
			ret = -EFAULT;
			goto out_unlock;
			if (ret)
				goto out;
			spin_lock(&file->table_lock);
			pagefault_disable();
		}

		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
	}

out_unlock:
	submit->nr_bos = i;
	pagefault_enable();
	spin_unlock(&file->table_lock);

out:
	submit->nr_bos = i;

	return ret;
}

@@ -377,6 +394,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
	if (ret)
		return ret;

	priv->struct_mutex_task = current;

	submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
	if (!submit) {
		ret = -ENOMEM;
@@ -468,6 +487,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
	if (ret)
		msm_gem_submit_free(submit);
out_unlock:
	priv->struct_mutex_task = NULL;
	mutex_unlock(&dev->struct_mutex);
	return ret;
}