Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06a660ad authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull media updates from Mauro Carvalho Chehab:
 "A series of patches that move part of the code used to allocate memory
  from the media subsystem to the mm subsystem"

[ The mm parts have been acked by VM people, and the series was
  apparently in -mm for a while   - Linus ]

* tag 'media/v4.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media:
  [media] drm/exynos: Convert g2d_userptr_get_dma_addr() to use get_vaddr_frames()
  [media] media: vb2: Remove unused functions
  [media] media: vb2: Convert vb2_dc_get_userptr() to use frame vector
  [media] media: vb2: Convert vb2_vmalloc_get_userptr() to use frame vector
  [media] media: vb2: Convert vb2_dma_sg_get_userptr() to use frame vector
  [media] vb2: Provide helpers for mapping virtual addresses
  [media] media: omap_vout: Convert omap_vout_uservirt_to_phys() to use get_vaddr_pfns()
  [media] mm: Provide new get_vaddr_frames() helper
  [media] vb2: Push mmap_sem down to memops
parents d9b44fe3 63540f01
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI
config DRM_EXYNOS_G2D
	bool "Exynos DRM G2D"
	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
	select FRAME_VECTOR
	help
	  Choose this option if you want to use Exynos G2D for DRM.

+29 −60
Original line number Diff line number Diff line
@@ -194,10 +194,8 @@ struct g2d_cmdlist_userptr {
	dma_addr_t		dma_addr;
	unsigned long		userptr;
	unsigned long		size;
	struct page		**pages;
	unsigned int		npages;
	struct frame_vector	*vec;
	struct sg_table		*sgt;
	struct vm_area_struct	*vma;
	atomic_t		refcount;
	bool			in_pool;
	bool			out_of_list;
@@ -367,6 +365,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
{
	struct g2d_cmdlist_userptr *g2d_userptr =
					(struct g2d_cmdlist_userptr *)obj;
	struct page **pages;

	if (!obj)
		return;
@@ -386,19 +385,21 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
	exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
					DMA_BIDIRECTIONAL);

	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
					g2d_userptr->npages,
					g2d_userptr->vma);
	pages = frame_vector_pages(g2d_userptr->vec);
	if (!IS_ERR(pages)) {
		int i;

	exynos_gem_put_vma(g2d_userptr->vma);
		for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
			set_page_dirty_lock(pages[i]);
	}
	put_vaddr_frames(g2d_userptr->vec);
	frame_vector_destroy(g2d_userptr->vec);

	if (!g2d_userptr->out_of_list)
		list_del_init(&g2d_userptr->list);

	sg_free_table(g2d_userptr->sgt);
	kfree(g2d_userptr->sgt);

	drm_free_large(g2d_userptr->pages);
	kfree(g2d_userptr);
}

@@ -412,9 +413,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
	struct g2d_cmdlist_userptr *g2d_userptr;
	struct g2d_data *g2d;
	struct page **pages;
	struct sg_table	*sgt;
	struct vm_area_struct *vma;
	unsigned long start, end;
	unsigned int npages, offset;
	int ret;
@@ -460,65 +459,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
		return ERR_PTR(-ENOMEM);

	atomic_set(&g2d_userptr->refcount, 1);
	g2d_userptr->size = size;

	start = userptr & PAGE_MASK;
	offset = userptr & ~PAGE_MASK;
	end = PAGE_ALIGN(userptr + size);
	npages = (end - start) >> PAGE_SHIFT;
	g2d_userptr->npages = npages;

	pages = drm_calloc_large(npages, sizeof(struct page *));
	if (!pages) {
		DRM_ERROR("failed to allocate pages.\n");
	g2d_userptr->vec = frame_vector_create(npages);
	if (!g2d_userptr->vec) {
		ret = -ENOMEM;
		goto err_free;
	}

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, userptr);
	if (!vma) {
		up_read(&current->mm->mmap_sem);
		DRM_ERROR("failed to get vm region.\n");
	ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
	if (ret != npages) {
		DRM_ERROR("failed to get user pages from userptr.\n");
		if (ret < 0)
			goto err_destroy_framevec;
		ret = -EFAULT;
		goto err_free_pages;
		goto err_put_framevec;
	}

	if (vma->vm_end < userptr + size) {
		up_read(&current->mm->mmap_sem);
		DRM_ERROR("vma is too small.\n");
	if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
		ret = -EFAULT;
		goto err_free_pages;
		goto err_put_framevec;
	}

	g2d_userptr->vma = exynos_gem_get_vma(vma);
	if (!g2d_userptr->vma) {
		up_read(&current->mm->mmap_sem);
		DRM_ERROR("failed to copy vma.\n");
		ret = -ENOMEM;
		goto err_free_pages;
	}

	g2d_userptr->size = size;

	ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
						npages, pages, vma);
	if (ret < 0) {
		up_read(&current->mm->mmap_sem);
		DRM_ERROR("failed to get user pages from userptr.\n");
		goto err_put_vma;
	}

	up_read(&current->mm->mmap_sem);
	g2d_userptr->pages = pages;

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		ret = -ENOMEM;
		goto err_free_userptr;
		goto err_put_framevec;
	}

	ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
					size, GFP_KERNEL);
	ret = sg_alloc_table_from_pages(sgt,
					frame_vector_pages(g2d_userptr->vec),
					npages, offset, size, GFP_KERNEL);
	if (ret < 0) {
		DRM_ERROR("failed to get sgt from pages.\n");
		goto err_free_sgt;
@@ -553,16 +527,11 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
err_free_sgt:
	kfree(sgt);

err_free_userptr:
	exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
					g2d_userptr->npages,
					g2d_userptr->vma);

err_put_vma:
	exynos_gem_put_vma(g2d_userptr->vma);
err_put_framevec:
	put_vaddr_frames(g2d_userptr->vec);

err_free_pages:
	drm_free_large(pages);
err_destroy_framevec:
	frame_vector_destroy(g2d_userptr->vec);

err_free:
	kfree(g2d_userptr);
+0 −97
Original line number Diff line number Diff line
@@ -366,103 +366,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
	return 0;
}

struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
{
	struct vm_area_struct *vma_copy;

	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
	if (!vma_copy)
		return NULL;

	if (vma->vm_ops && vma->vm_ops->open)
		vma->vm_ops->open(vma);

	if (vma->vm_file)
		get_file(vma->vm_file);

	memcpy(vma_copy, vma, sizeof(*vma));

	vma_copy->vm_mm = NULL;
	vma_copy->vm_next = NULL;
	vma_copy->vm_prev = NULL;

	return vma_copy;
}

void exynos_gem_put_vma(struct vm_area_struct *vma)
{
	if (!vma)
		return;

	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);

	if (vma->vm_file)
		fput(vma->vm_file);

	kfree(vma);
}

int exynos_gem_get_pages_from_userptr(unsigned long start,
						unsigned int npages,
						struct page **pages,
						struct vm_area_struct *vma)
{
	int get_npages;

	/* the memory region mmaped with VM_PFNMAP. */
	if (vma_is_io(vma)) {
		unsigned int i;

		for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
			unsigned long pfn;
			int ret = follow_pfn(vma, start, &pfn);
			if (ret)
				return ret;

			pages[i] = pfn_to_page(pfn);
		}

		if (i != npages) {
			DRM_ERROR("failed to get user_pages.\n");
			return -EINVAL;
		}

		return 0;
	}

	get_npages = get_user_pages(current, current->mm, start,
					npages, 1, 1, pages, NULL);
	get_npages = max(get_npages, 0);
	if (get_npages != npages) {
		DRM_ERROR("failed to get user_pages.\n");
		while (get_npages)
			put_page(pages[--get_npages]);
		return -EFAULT;
	}

	return 0;
}

void exynos_gem_put_pages_to_userptr(struct page **pages,
					unsigned int npages,
					struct vm_area_struct *vma)
{
	if (!vma_is_io(vma)) {
		unsigned int i;

		for (i = 0; i < npages; i++) {
			set_page_dirty_lock(pages[i]);

			/*
			 * undo the reference we took when populating
			 * the table.
			 */
			put_page(pages[i]);
		}
	}
}

int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
				struct sg_table *sgt,
				enum dma_data_direction dir)
+1 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@ config VIDEO_OMAP2_VOUT
	select OMAP2_DSS if HAS_IOMEM && ARCH_OMAP2PLUS
	select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
	select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
	select FRAME_VECTOR
	default n
	---help---
	  V4L2 Display driver support for OMAP2/3 based boards.
+31 −38
Original line number Diff line number Diff line
@@ -195,46 +195,34 @@ static int omap_vout_try_format(struct v4l2_pix_format *pix)
}

/*
 * omap_vout_uservirt_to_phys: This inline function is used to convert user
 * space virtual address to physical address.
 * omap_vout_get_userptr: Convert user space virtual address to physical
 * address.
 */
static unsigned long omap_vout_uservirt_to_phys(unsigned long virtp)
static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
				 u32 *physp)
{
	unsigned long physp = 0;
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	struct frame_vector *vec;
	int ret;

	/* For kernel direct-mapped memory, take the easy way */
	if (virtp >= PAGE_OFFSET)
		return virt_to_phys((void *) virtp);

	down_read(&current->mm->mmap_sem);
	vma = find_vma(mm, virtp);
	if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
		/* this will catch, kernel-allocated, mmaped-to-usermode
		   addresses */
		physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
		up_read(&current->mm->mmap_sem);
	} else {
		/* otherwise, use get_user_pages() for general userland pages */
		int res, nr_pages = 1;
		struct page *pages;

		res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
				0, &pages, NULL);
		up_read(&current->mm->mmap_sem);

		if (res == nr_pages) {
			physp =  __pa(page_address(&pages[0]) +
					(virtp & ~PAGE_MASK));
		} else {
			printk(KERN_WARNING VOUT_NAME
					"get_user_pages failed\n");
	if (virtp >= PAGE_OFFSET) {
		*physp = virt_to_phys((void *)virtp);
		return 0;
	}

	vec = frame_vector_create(1);
	if (!vec)
		return -ENOMEM;

	ret = get_vaddr_frames(virtp, 1, true, false, vec);
	if (ret != 1) {
		frame_vector_destroy(vec);
		return -EINVAL;
	}
	*physp = __pfn_to_phys(frame_vector_pfns(vec)[0]);
	vb->priv = vec;

	return physp;
	return 0;
}

/*
@@ -784,11 +772,15 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
	 * address of the buffer
	 */
	if (V4L2_MEMORY_USERPTR == vb->memory) {
		int ret;

		if (0 == vb->baddr)
			return -EINVAL;
		/* Physical address */
		vout->queued_buf_addr[vb->i] = (u8 *)
			omap_vout_uservirt_to_phys(vb->baddr);
		ret = omap_vout_get_userptr(vb, vb->baddr,
				(u32 *)&vout->queued_buf_addr[vb->i]);
		if (ret < 0)
			return ret;
	} else {
		unsigned long addr, dma_addr;
		unsigned long size;
@@ -834,12 +826,13 @@ static void omap_vout_buffer_queue(struct videobuf_queue *q,
static void omap_vout_buffer_release(struct videobuf_queue *q,
			    struct videobuf_buffer *vb)
{
	struct omap_vout_device *vout = q->priv_data;

	vb->state = VIDEOBUF_NEEDS_INIT;
	if (vb->memory == V4L2_MEMORY_USERPTR && vb->priv) {
		struct frame_vector *vec = vb->priv;

	if (V4L2_MEMORY_MMAP != vout->memory)
		return;
		put_vaddr_frames(vec);
		frame_vector_destroy(vec);
	}
}

/*
Loading