Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 41d9eb2c authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: add a fence after the VM flush



This way we can track when the flush is done.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 832a902f
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -880,6 +880,7 @@ struct amdgpu_vm_id {
	struct list_head	list;
	struct fence		*first;
	struct amdgpu_sync	active;
	struct fence		*last_flush;
	atomic_long_t		owner;

	uint64_t		pd_gpu_addr;
@@ -926,7 +927,7 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
		      struct amdgpu_sync *sync, struct fence *fence,
		      unsigned *vm_id, uint64_t *vm_pd_addr);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
int amdgpu_vm_flush(struct amdgpu_ring *ring,
		    unsigned vm_id, uint64_t pd_addr,
		    uint32_t gds_base, uint32_t gds_size,
		    uint32_t gws_base, uint32_t gws_size,
+8 −4
Original line number Diff line number Diff line
@@ -155,10 +155,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,

	if (vm) {
		/* do context switch */
		amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
		r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
				    ib->gds_base, ib->gds_size,
				    ib->gws_base, ib->gws_size,
				    ib->oa_base, ib->oa_size);
		if (r) {
			amdgpu_ring_undo(ring);
			return r;
		}

		if (ring->funcs->emit_hdp_flush)
			amdgpu_ring_emit_hdp_flush(ring);
+21 −5
Original line number Diff line number Diff line
@@ -236,6 +236,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
	fence_put(id->first);
	id->first = fence_get(fence);

	fence_put(id->last_flush);
	id->last_flush = NULL;

	fence_put(id->flushed_updates);
	id->flushed_updates = fence_get(updates);

@@ -263,7 +266,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 *
 * Emit a VM flush when it is necessary.
 */
void amdgpu_vm_flush(struct amdgpu_ring *ring,
int amdgpu_vm_flush(struct amdgpu_ring *ring,
		    unsigned vm_id, uint64_t pd_addr,
		    uint32_t gds_base, uint32_t gds_size,
		    uint32_t gws_base, uint32_t gws_size,
@@ -278,14 +281,25 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
		id->gws_size != gws_size ||
		id->oa_base != oa_base ||
		id->oa_size != oa_size);
	int r;

	if (ring->funcs->emit_pipeline_sync && (
	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
		amdgpu_ring_emit_pipeline_sync(ring);

	if (pd_addr != AMDGPU_VM_NO_FLUSH) {
		struct fence *fence;

		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
		amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
		r = amdgpu_fence_emit(ring, &fence);
		if (r)
			return r;

		mutex_lock(&adev->vm_manager.lock);
		fence_put(id->last_flush);
		id->last_flush = fence;
		mutex_unlock(&adev->vm_manager.lock);
	}

	if (gds_switch_needed) {
@@ -300,6 +314,8 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
					    gws_base, gws_size,
					    oa_base, oa_size);
	}

	return 0;
}

/**