Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 10d123b2 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux into drm-next

concurrent buffer reads.

* 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: allow concurrent buffer reads
  drm/radeon: add the infrastructure for concurrent buffer access
  drm/ttm: allow fence to be added as shared
parents e351943b 298593b6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)

	qxl_bo_ref(bo);
	entry->tv.bo = &bo->tbo;
	entry->tv.shared = false;
	list_add_tail(&entry->tv.head, &release->bos);
	return 0;
}
+13 −12
Original line number Diff line number Diff line
@@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @num_gpu_pages: number of GPU pages to xfer
 * @fence: radeon fence object
 * @resv: reservation object to sync to
 *
 * Copy GPU paging using the CP DMA engine (CIK+).
 * Used by the radeon ttm implementation to move pages if
 * registered as the asic copy callback.
 */
int cik_copy_cpdma(struct radeon_device *rdev,
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
				    uint64_t src_offset, uint64_t dst_offset,
				    unsigned num_gpu_pages,
		   struct radeon_fence **fence)
				    struct reservation_object *resv)
{
	struct radeon_semaphore *sem = NULL;
	struct radeon_fence *fence;
	int ring_index = rdev->asic->copy.blit_ring_index;
	struct radeon_ring *ring = &rdev->ring[ring_index];
	u32 size_in_bytes, cur_size_in_bytes, control;
@@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
	r = radeon_semaphore_create(rdev, &sem);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		return r;
		return ERR_PTR(r);
	}

	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev,
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		radeon_semaphore_free(rdev, &sem, NULL);
		return r;
		return ERR_PTR(r);
	}

	radeon_semaphore_sync_to(sem, *fence);
	radeon_semaphore_sync_resv(sem, resv, false);
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);

	for (i = 0; i < num_loops; i++) {
@@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev,
		dst_offset += cur_size_in_bytes;
	}

	r = radeon_fence_emit(rdev, fence, ring->idx);
	r = radeon_fence_emit(rdev, &fence, ring->idx);
	if (r) {
		radeon_ring_unlock_undo(rdev, ring);
		radeon_semaphore_free(rdev, &sem, NULL);
		return r;
		return ERR_PTR(r);
	}

	radeon_ring_unlock_commit(rdev, ring, false);
	radeon_semaphore_free(rdev, &sem, *fence);
	radeon_semaphore_free(rdev, &sem, fence);

	return r;
	return fence;
}

/*
+13 −12
Original line number Diff line number Diff line
@@ -537,18 +537,19 @@ void cik_sdma_fini(struct radeon_device *rdev)
 * @src_offset: src GPU address
 * @dst_offset: dst GPU address
 * @num_gpu_pages: number of GPU pages to xfer
 * @fence: radeon fence object
 * @resv: reservation object to sync to
 *
 * Copy GPU paging using the DMA engine (CIK).
 * Used by the radeon ttm implementation to move pages if
 * registered as the asic copy callback.
 */
int cik_copy_dma(struct radeon_device *rdev,
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
				  uint64_t src_offset, uint64_t dst_offset,
				  unsigned num_gpu_pages,
		 struct radeon_fence **fence)
				  struct reservation_object *resv)
{
	struct radeon_semaphore *sem = NULL;
	struct radeon_fence *fence;
	int ring_index = rdev->asic->copy.dma_ring_index;
	struct radeon_ring *ring = &rdev->ring[ring_index];
	u32 size_in_bytes, cur_size_in_bytes;
@@ -558,7 +559,7 @@ int cik_copy_dma(struct radeon_device *rdev,
	r = radeon_semaphore_create(rdev, &sem);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		return r;
		return ERR_PTR(r);
	}

	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
@@ -567,10 +568,10 @@ int cik_copy_dma(struct radeon_device *rdev,
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		radeon_semaphore_free(rdev, &sem, NULL);
		return r;
		return ERR_PTR(r);
	}

	radeon_semaphore_sync_to(sem, *fence);
	radeon_semaphore_sync_resv(sem, resv, false);
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);

	for (i = 0; i < num_loops; i++) {
@@ -589,17 +590,17 @@ int cik_copy_dma(struct radeon_device *rdev,
		dst_offset += cur_size_in_bytes;
	}

	r = radeon_fence_emit(rdev, fence, ring->idx);
	r = radeon_fence_emit(rdev, &fence, ring->idx);
	if (r) {
		radeon_ring_unlock_undo(rdev, ring);
		radeon_semaphore_free(rdev, &sem, NULL);
		return r;
		return ERR_PTR(r);
	}

	radeon_ring_unlock_commit(rdev, ring, false);
	radeon_semaphore_free(rdev, &sem, *fence);
	radeon_semaphore_free(rdev, &sem, fence);

	return r;
	return fence;
}

/**
+13 −11
Original line number Diff line number Diff line
@@ -104,12 +104,14 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
 * Used by the radeon ttm implementation to move pages if
 * registered as the asic copy callback.
 */
int evergreen_copy_dma(struct radeon_device *rdev,
		       uint64_t src_offset, uint64_t dst_offset,
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
					uint64_t src_offset,
					uint64_t dst_offset,
					unsigned num_gpu_pages,
		       struct radeon_fence **fence)
					struct reservation_object *resv)
{
	struct radeon_semaphore *sem = NULL;
	struct radeon_fence *fence;
	int ring_index = rdev->asic->copy.dma_ring_index;
	struct radeon_ring *ring = &rdev->ring[ring_index];
	u32 size_in_dw, cur_size_in_dw;
@@ -119,7 +121,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
	r = radeon_semaphore_create(rdev, &sem);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		return r;
		return ERR_PTR(r);
	}

	size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
@@ -128,10 +130,10 @@ int evergreen_copy_dma(struct radeon_device *rdev,
	if (r) {
		DRM_ERROR("radeon: moving bo (%d).\n", r);
		radeon_semaphore_free(rdev, &sem, NULL);
		return r;
		return ERR_PTR(r);
	}

	radeon_semaphore_sync_to(sem, *fence);
	radeon_semaphore_sync_resv(sem, resv, false);
	radeon_semaphore_sync_rings(rdev, sem, ring->idx);

	for (i = 0; i < num_loops; i++) {
@@ -148,17 +150,17 @@ int evergreen_copy_dma(struct radeon_device *rdev,
		dst_offset += cur_size_in_dw * 4;
	}

	r = radeon_fence_emit(rdev, fence, ring->idx);
	r = radeon_fence_emit(rdev, &fence, ring->idx);
	if (r) {
		radeon_ring_unlock_undo(rdev, ring);
		radeon_semaphore_free(rdev, &sem, NULL);
		return r;
		return ERR_PTR(r);
	}

	radeon_ring_unlock_commit(rdev, ring, false);
	radeon_semaphore_free(rdev, &sem, *fence);
	radeon_semaphore_free(rdev, &sem, fence);

	return r;
	return fence;
}

/**
+12 −9
Original line number Diff line number Diff line
@@ -855,13 +855,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev,
	return false;
}

int r100_copy_blit(struct radeon_device *rdev,
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
				    uint64_t src_offset,
				    uint64_t dst_offset,
				    unsigned num_gpu_pages,
		   struct radeon_fence **fence)
				    struct reservation_object *resv)
{
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
	struct radeon_fence *fence;
	uint32_t cur_pages;
	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
	uint32_t pitch;
@@ -882,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev,
	r = radeon_ring_lock(rdev, ring, ndw);
	if (r) {
		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
		return -EINVAL;
		return ERR_PTR(-EINVAL);
	}
	while (num_gpu_pages > 0) {
		cur_pages = num_gpu_pages;
@@ -922,11 +923,13 @@ int r100_copy_blit(struct radeon_device *rdev,
			  RADEON_WAIT_2D_IDLECLEAN |
			  RADEON_WAIT_HOST_IDLECLEAN |
			  RADEON_WAIT_DMA_GUI_IDLE);
	if (fence) {
		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
	if (r) {
		radeon_ring_unlock_undo(rdev, ring);
		return ERR_PTR(r);
	}
	radeon_ring_unlock_commit(rdev, ring, false);
	return r;
	return fence;
}

static int r100_cp_wait_for_idle(struct radeon_device *rdev)
Loading