Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92c60d9c authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: bind BOs to TTM only once



No need to do this on every round.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
parent fc9c8f54
Loading
Loading
Loading
Loading
+29 −41
Original line number Diff line number Diff line
@@ -252,29 +252,15 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
	new_mem->mm_node = NULL;
}

static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
				    struct drm_mm_node *mm_node,
			       struct ttm_mem_reg *mem,
			       uint64_t *addr)
				    struct ttm_mem_reg *mem)
{
	int r;

	switch (mem->mem_type) {
	case TTM_PL_TT:
		r = amdgpu_ttm_bind(bo, mem);
		if (r)
			return r;

	case TTM_PL_VRAM:
		*addr = mm_node->start << PAGE_SHIFT;
		*addr += bo->bdev->man[mem->mem_type].gpu_offset;
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", mem->mem_type);
		return -EINVAL;
	}
	uint64_t addr;

	return 0;
	addr = mm_node->start << PAGE_SHIFT;
	addr += bo->bdev->man[mem->mem_type].gpu_offset;
	return addr;
}

static int amdgpu_move_blit(struct ttm_buffer_object *bo,
@@ -298,18 +284,25 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
		return -EINVAL;
	}

	old_mm = old_mem->mm_node;
	r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
	if (old_mem->mem_type == TTM_PL_TT) {
		r = amdgpu_ttm_bind(bo, old_mem);
		if (r)
			return r;
	old_size = old_mm->size;
	}

	old_mm = old_mem->mm_node;
	old_size = old_mm->size;
	old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);

	new_mm = new_mem->mm_node;
	r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
	if (new_mem->mem_type == TTM_PL_TT) {
		r = amdgpu_ttm_bind(bo, new_mem);
		if (r)
			return r;
	}

	new_mm = new_mem->mm_node;
	new_size = new_mm->size;
	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);

	num_pages = new_mem->num_pages;
	while (num_pages) {
@@ -331,10 +324,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,

		old_size -= cur_pages;
		if (!old_size) {
			r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
						&old_start);
			if (r)
				goto error;
			old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
			old_size = old_mm->size;
		} else {
			old_start += cur_pages * PAGE_SIZE;
@@ -342,11 +332,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,

		new_size -= cur_pages;
		if (!new_size) {
			r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
						&new_start);
			if (r)
				goto error;

			new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
			new_size = new_mm->size;
		} else {
			new_start += cur_pages * PAGE_SIZE;
@@ -1347,6 +1333,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
		return -EINVAL;
	}

	if (bo->tbo.mem.mem_type == TTM_PL_TT) {
		r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
		if (r)
			return r;
	}

	num_pages = bo->tbo.num_pages;
	mm_node = bo->tbo.mem.mm_node;
	num_loops = 0;
@@ -1382,11 +1374,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
		uint32_t byte_count = mm_node->size << PAGE_SHIFT;
		uint64_t dst_addr;

		r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
					&bo->tbo.mem, &dst_addr);
		if (r)
			return r;

		dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
		while (byte_count) {
			uint32_t cur_size_in_bytes = min(byte_count, max_bytes);