Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 81bec8af authored by shaohanlin's avatar shaohanlin
Browse files

Merge SPL 2021-03-05 into mp

* origin/8901-fp3-q-mp-release-spl-202103:
  msm: kgsl: Don't allow re-importing memory owned by KGSL
  gpu: drm: msm: add event to event_list after register is successful
Change-Id: I836eb52f01ccc09c0f12f3183dbcf0e7f2063d6f
parents 030346b1 799f48d0
Loading
Loading
Loading
Loading
+12 −9
Original line number Diff line number Diff line
/*
 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved.
 * Copyright (C) 2013 Red Hat
 * Author: Rob Clark <robdclark@gmail.com>
 *
@@ -1295,24 +1295,27 @@ static int msm_ioctl_register_event(struct drm_device *dev, void *data,
	 * calls add to client list and return.
	 */
	count = msm_event_client_count(dev, req_event, false);
	if (count) {
		/* Add current client to list */
		spin_lock_irqsave(&dev->event_lock, flag);
		list_add_tail(&client->base.link, &priv->client_event_list);
		spin_unlock_irqrestore(&dev->event_lock, flag);

	if (count)
		return 0;
	}

	ret = msm_register_event(dev, req_event, file, true);
	if (ret) {
		DRM_ERROR("failed to enable event %x object %x object id %d\n",
			req_event->event, req_event->object_type,
			req_event->object_id);
		kfree(client);
	} else {
		/* Add current client to list */
		spin_lock_irqsave(&dev->event_lock, flag);
		list_del(&client->base.link);
		list_add_tail(&client->base.link, &priv->client_event_list);
		spin_unlock_irqrestore(&dev->event_lock, flag);
		kfree(client);
	}

	return ret;
}

+26 −31
Original line number Diff line number Diff line
@@ -2090,14 +2090,6 @@ long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
	return ret;
}

static inline int _check_region(unsigned long start, unsigned long size,
				uint64_t len)
{
	uint64_t end = ((uint64_t) start) + size;

	return (end > len);
}

static int check_vma_flags(struct vm_area_struct *vma,
		unsigned int flags)
{
@@ -2112,23 +2104,27 @@ static int check_vma_flags(struct vm_area_struct *vma,
	return -EFAULT;
}

static int check_vma(struct vm_area_struct *vma, struct file *vmfile,
		struct kgsl_memdesc *memdesc)
static int check_vma(unsigned long hostptr, u64 size)
{
	if (vma == NULL || vma->vm_file != vmfile)
		return -EINVAL;
	struct vm_area_struct *vma;
	unsigned long cur = hostptr;

	/* userspace may not know the size, in which case use the whole vma */
	if (memdesc->size == 0)
		memdesc->size = vma->vm_end - vma->vm_start;
	/* range checking */
	if (vma->vm_start != memdesc->useraddr ||
		(memdesc->useraddr + memdesc->size) != vma->vm_end)
		return -EINVAL;
	return check_vma_flags(vma, memdesc->flags);
	while (cur < (hostptr + size)) {
		vma = find_vma(current->mm, cur);
		if (!vma)
			return false;

		/* Don't remap memory that we already own */
		if (vma->vm_file && vma->vm_file->f_op == &kgsl_fops)
			return false;

		cur = vma->vm_end;
	}

static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
	return true;
}

static int memdesc_sg_virt(struct kgsl_memdesc *memdesc)
{
	int ret = 0;
	long npages = 0, i;
@@ -2151,18 +2147,17 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile)
	}

	down_read(&current->mm->mmap_sem);
	/* If we have vmfile, make sure we map the correct vma and map it all */
	if (vmfile != NULL)
		ret = check_vma(find_vma(current->mm, memdesc->useraddr),
				vmfile, memdesc);
	if (!check_vma(memdesc->useraddr, memdesc->size)) {
		up_read(&current->mm->mmap_sem);
		ret = -EFAULT;
		goto out;
	}

	if (ret == 0) {
	npages = get_user_pages(memdesc->useraddr,
				sglen, write, pages, NULL);
		ret = (npages < 0) ? (int)npages : 0;
	}
	up_read(&current->mm->mmap_sem);

	ret = (npages < 0) ? (int)npages : 0;
	if (ret)
		goto out;

@@ -2213,7 +2208,7 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable,
		entry->memdesc.gpuaddr = (uint64_t)  entry->memdesc.useraddr;
	}

	return memdesc_sg_virt(&entry->memdesc, NULL);
	return memdesc_sg_virt(&entry->memdesc);
}

#ifdef CONFIG_DMA_SHARED_BUFFER