Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 69a834c2 authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm: deal with exhausted vmap space better



Some, but not all, callers of obj->vmap() would check if return
IS_ERR().  So let's actually return an error if vmap() fails.  And fixup
the call-sites that were not handling this properly.

Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent ab3ab684
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
	}

	adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
	if (!adreno_gpu->memptrs) {
	if (IS_ERR(adreno_gpu->memptrs)) {
		dev_err(drm->dev, "could not vmap memptrs\n");
		return -ENOMEM;
	}
+4 −0
Original line number Diff line number Diff line
@@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
	dev->mode_config.fb_base = paddr;

	fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
	if (IS_ERR(fbi->screen_base)) {
		ret = PTR_ERR(fbi->screen_base);
		goto fail_unlock;
	}
	fbi->screen_size = fbdev->bo->size;
	fbi->fix.smem_start = paddr;
	fbi->fix.smem_len = fbdev->bo->size;
+2 −0
Original line number Diff line number Diff line
@@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
			return ERR_CAST(pages);
		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
		if (msm_obj->vaddr == NULL)
			return ERR_PTR(-ENOMEM);
	}
	return msm_obj->vaddr;
}
+3 −0
Original line number Diff line number Diff line
@@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
		struct msm_gem_object *obj = submit->bos[idx].obj;
		const char *buf = msm_gem_vaddr_locked(&obj->base);

		if (IS_ERR(buf))
			continue;

		buf += iova - submit->bos[idx].iova;

		rd_write_section(rd, RD_GPUADDR,
+4 −0
Original line number Diff line number Diff line
@@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
	}

	ring->start = msm_gem_vaddr_locked(ring->bo);
	if (IS_ERR(ring->start)) {
		ret = PTR_ERR(ring->start);
		goto fail;
	}
	ring->end   = ring->start + (size / 4);
	ring->cur   = ring->start;