Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04cd2145 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux into drm-next

Just clearing out my -next queue before I go on vacation.  Two UVD
improvements that depend on the ttm change you just merged.

* 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: preallocate mem for UVD create/destroy msg
  drm/radeon: allow UVD to use a second 256MB segment
parents 6adae108 feba9b0b
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -1642,7 +1642,8 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
			      uint32_t handle, struct radeon_fence **fence);
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
			       uint32_t handle, struct radeon_fence **fence);
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
				       uint32_t allowed_domains);
void radeon_uvd_free_handles(struct radeon_device *rdev,
			     struct drm_file *filp);
int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
+3 −2
Original line number Diff line number Diff line
@@ -491,6 +491,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
		bo = lobj->robj;
		if (!bo->pin_count) {
			u32 domain = lobj->prefered_domains;
			u32 allowed = lobj->allowed_domains;
			u32 current_domain =
				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);

@@ -502,7 +503,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
			 * into account. We don't want to disallow buffer moves
			 * completely.
			 */
			if ((lobj->allowed_domains & current_domain) != 0 &&
			if ((allowed & current_domain) != 0 &&
			    (domain & current_domain) == 0 && /* will be moved */
			    bytes_moved > bytes_moved_threshold) {
				/* don't move it */
@@ -512,7 +513,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
			if (ring == R600_RING_TYPE_UVD_INDEX)
				radeon_uvd_force_into_uvd_segment(bo);
				radeon_uvd_force_into_uvd_segment(bo, allowed);

			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+43 −76
Original line number Diff line number Diff line
@@ -138,7 +138,8 @@ int radeon_uvd_init(struct radeon_device *rdev)
	}

	bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
		  RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
		  RADEON_GPU_PAGE_SIZE;
	r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
	if (r) {
@@ -254,7 +255,8 @@ int radeon_uvd_resume(struct radeon_device *rdev)
	return 0;
}

void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
				       uint32_t allowed_domains)
{
	int i;

@@ -262,6 +264,21 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
		rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
		rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
	}

	/* If it must be in VRAM it must be in the first segment as well */
	if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
		return;

	/* abort if we already have more than one placement */
	if (rbo->placement.num_placement > 1)
		return;

	/* add another 256MB segment */
	rbo->placements[1] = rbo->placements[0];
	rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
	rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
	rbo->placement.num_placement++;
	rbo->placement.num_busy_placement++;
}

void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
@@ -631,38 +648,16 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
}

static int radeon_uvd_send_msg(struct radeon_device *rdev,
			       int ring, struct radeon_bo *bo,
			       int ring, uint64_t addr,
			       struct radeon_fence **fence)
{
	struct ttm_validate_buffer tv;
	struct ww_acquire_ctx ticket;
	struct list_head head;
	struct radeon_ib ib;
	uint64_t addr;
	int i, r;

	memset(&tv, 0, sizeof(tv));
	tv.bo = &bo->tbo;

	INIT_LIST_HEAD(&head);
	list_add(&tv.head, &head);

	r = ttm_eu_reserve_buffers(&ticket, &head);
	if (r)
		return r;

	radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
	radeon_uvd_force_into_uvd_segment(bo);

	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
	if (r) 
		goto err;

	r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
	if (r)
		goto err;
		return r;

	addr = radeon_bo_gpu_offset(bo);
	ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
	ib.ptr[1] = addr;
	ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
@@ -674,19 +669,11 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
	ib.length_dw = 16;

	r = radeon_ib_schedule(rdev, &ib, NULL, false);
	if (r)
		goto err;
	ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);

	if (fence)
		*fence = radeon_fence_ref(ib.fence);

	radeon_ib_free(rdev, &ib);
	radeon_bo_unref(&bo);
	return 0;

err:
	ttm_eu_backoff_reservation(&ticket, &head);
	return r;
}

@@ -696,27 +683,18 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
			      uint32_t handle, struct radeon_fence **fence)
{
	struct radeon_bo *bo;
	uint32_t *msg;
	int r, i;
	/* we use the last page of the vcpu bo for the UVD message */
	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
		RADEON_GPU_PAGE_SIZE;

	r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
	if (r)
		return r;
	uint32_t *msg = rdev->uvd.cpu_addr + offs;
	uint64_t addr = rdev->uvd.gpu_addr + offs;

	r = radeon_bo_reserve(bo, false);
	if (r) {
		radeon_bo_unref(&bo);
		return r;
	}
	int r, i;

	r = radeon_bo_kmap(bo, (void **)&msg);
	if (r) {
		radeon_bo_unreserve(bo);
		radeon_bo_unref(&bo);
	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
	if (r)
		return r;
	}

	/* stitch together an UVD create msg */
	msg[0] = cpu_to_le32(0x00000de4);
@@ -733,36 +711,26 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
	for (i = 11; i < 1024; ++i)
		msg[i] = cpu_to_le32(0x0);

	radeon_bo_kunmap(bo);
	radeon_bo_unreserve(bo);

	return radeon_uvd_send_msg(rdev, ring, bo, fence);
	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
	return r;
}

int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
			       uint32_t handle, struct radeon_fence **fence)
{
	struct radeon_bo *bo;
	uint32_t *msg;
	int r, i;
	/* we use the last page of the vcpu bo for the UVD message */
	uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
		RADEON_GPU_PAGE_SIZE;

	r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
			     RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
	if (r)
		return r;
	uint32_t *msg = rdev->uvd.cpu_addr + offs;
	uint64_t addr = rdev->uvd.gpu_addr + offs;

	r = radeon_bo_reserve(bo, false);
	if (r) {
		radeon_bo_unref(&bo);
		return r;
	}
	int r, i;

	r = radeon_bo_kmap(bo, (void **)&msg);
	if (r) {
		radeon_bo_unreserve(bo);
		radeon_bo_unref(&bo);
	r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
	if (r)
		return r;
	}

	/* stitch together an UVD destroy msg */
	msg[0] = cpu_to_le32(0x00000de4);
@@ -772,10 +740,9 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
	for (i = 4; i < 1024; ++i)
		msg[i] = cpu_to_le32(0x0);

	radeon_bo_kunmap(bo);
	radeon_bo_unreserve(bo);

	return radeon_uvd_send_msg(rdev, ring, bo, fence);
	r = radeon_uvd_send_msg(rdev, ring, addr, fence);
	radeon_bo_unreserve(rdev->uvd.vcpu_bo);
	return r;
}

/**