Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1d263474 authored by Dan Carpenter's avatar Dan Carpenter Committed by Alex Deucher
Browse files

drm/amdgpu: unwind properly in amdgpu_cs_parser_init()



The amdgpu_cs_parser_init() function doesn't clean up after itself but
instead the caller uses a free everything function amdgpu_cs_parser_fini()
on failure.  This style of error handling is often buggy.  In this
example, we call "drm_free_large(parser->chunks[i].kdata);" when it is
an unintialized pointer or when "parser->chunks" is NULL.

I fixed this bug by adding unwind code so that it frees everything that
it allocates.

I also mode some other very minor changes:
1) Renamed "r" to "ret".
2) Moved the chunk_array allocation to the start of the function.
3) Removed some initializers which are no longer needed.

Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reported-by: default avatarIlja Van Sprundel <ivansprundel@ioactive.com>
Signed-off-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5a6adfa2
Loading
Loading
Loading
Loading
+51 −34
Original line number Original line Diff line number Diff line
@@ -154,42 +154,41 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
{
	union drm_amdgpu_cs *cs = data;
	union drm_amdgpu_cs *cs = data;
	uint64_t *chunk_array_user;
	uint64_t *chunk_array_user;
	uint64_t *chunk_array = NULL;
	uint64_t *chunk_array;
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
	unsigned size, i;
	unsigned size, i;
	int r = 0;
	int ret;


	if (!cs->in.num_chunks)
	if (cs->in.num_chunks == 0)
		goto out;
		return 0;

	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (!chunk_array)
		return -ENOMEM;


	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
	p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
	if (!p->ctx) {
	if (!p->ctx) {
		r = -EINVAL;
		ret = -EINVAL;
		goto out;
		goto free_chunk;
	}
	}

	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
	p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);


	/* get chunks */
	/* get chunks */
	INIT_LIST_HEAD(&p->validated);
	INIT_LIST_HEAD(&p->validated);
	chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (chunk_array == NULL) {
		r = -ENOMEM;
		goto out;
	}

	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
	if (copy_from_user(chunk_array, chunk_array_user,
	if (copy_from_user(chunk_array, chunk_array_user,
			   sizeof(uint64_t)*cs->in.num_chunks)) {
			   sizeof(uint64_t)*cs->in.num_chunks)) {
		r = -EFAULT;
		ret = -EFAULT;
		goto out;
		goto put_bo_list;
	}
	}


	p->nchunks = cs->in.num_chunks;
	p->nchunks = cs->in.num_chunks;
	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
	p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
			    GFP_KERNEL);
			    GFP_KERNEL);
	if (p->chunks == NULL) {
	if (!p->chunks) {
		r = -ENOMEM;
		ret = -ENOMEM;
		goto out;
		goto put_bo_list;
	}
	}


	for (i = 0; i < p->nchunks; i++) {
	for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +199,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
		chunk_ptr = (void __user *)chunk_array[i];
		chunk_ptr = (void __user *)chunk_array[i];
		if (copy_from_user(&user_chunk, chunk_ptr,
		if (copy_from_user(&user_chunk, chunk_ptr,
				       sizeof(struct drm_amdgpu_cs_chunk))) {
				       sizeof(struct drm_amdgpu_cs_chunk))) {
			r = -EFAULT;
			ret = -EFAULT;
			goto out;
			i--;
			goto free_partial_kdata;
		}
		}
		p->chunks[i].chunk_id = user_chunk.chunk_id;
		p->chunks[i].chunk_id = user_chunk.chunk_id;
		p->chunks[i].length_dw = user_chunk.length_dw;
		p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)


		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
		if (p->chunks[i].kdata == NULL) {
		if (p->chunks[i].kdata == NULL) {
			r = -ENOMEM;
			ret = -ENOMEM;
			goto out;
			i--;
			goto free_partial_kdata;
		}
		}
		size *= sizeof(uint32_t);
		size *= sizeof(uint32_t);
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
			r = -EFAULT;
			ret = -EFAULT;
			goto out;
			goto free_partial_kdata;
		}
		}


		switch (p->chunks[i].chunk_id) {
		switch (p->chunks[i].chunk_id) {
@@ -238,15 +239,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
				gobj = drm_gem_object_lookup(p->adev->ddev,
				gobj = drm_gem_object_lookup(p->adev->ddev,
							     p->filp, handle);
							     p->filp, handle);
				if (gobj == NULL) {
				if (gobj == NULL) {
					r = -EINVAL;
					ret = -EINVAL;
					goto out;
					goto free_partial_kdata;
				}
				}


				p->uf.bo = gem_to_amdgpu_bo(gobj);
				p->uf.bo = gem_to_amdgpu_bo(gobj);
				p->uf.offset = fence_data->offset;
				p->uf.offset = fence_data->offset;
			} else {
			} else {
				r = -EINVAL;
				ret = -EINVAL;
				goto out;
				goto free_partial_kdata;
			}
			}
			break;
			break;


@@ -254,19 +255,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
			break;
			break;


		default:
		default:
			r = -EINVAL;
			ret = -EINVAL;
			goto out;
			goto free_partial_kdata;
		}
		}
	}
	}




	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
	p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
	if (!p->ibs)
	if (!p->ibs) {
		r = -ENOMEM;
		ret = -ENOMEM;
		goto free_all_kdata;
	}


out:
	kfree(chunk_array);
	kfree(chunk_array);
	return r;
	return 0;

free_all_kdata:
	i = p->nchunks - 1;
free_partial_kdata:
	for (; i >= 0; i--)
		drm_free_large(p->chunks[i].kdata);
	kfree(p->chunks);
put_bo_list:
	if (p->bo_list)
		amdgpu_bo_list_put(p->bo_list);
	amdgpu_ctx_put(p->ctx);
free_chunk:
	kfree(chunk_array);

	return ret;
}
}


/* Returns how many bytes TTM can move per IB.
/* Returns how many bytes TTM can move per IB.
@@ -810,7 +827,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
	r = amdgpu_cs_parser_init(parser, data);
	r = amdgpu_cs_parser_init(parser, data);
	if (r) {
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		DRM_ERROR("Failed to initialize parser !\n");
		amdgpu_cs_parser_fini(parser, r, false);
		kfree(parser);
		up_read(&adev->exclusive_lock);
		up_read(&adev->exclusive_lock);
		r = amdgpu_cs_handle_lockup(adev, r);
		r = amdgpu_cs_handle_lockup(adev, r);
		return r;
		return r;