Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b9bf33d5 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher
Browse files

drm/amdgpu: make pipeline sync be in same place v2



v2: directly return for 'if' case.

Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent df83d1eb
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1132,7 +1132,6 @@ struct amdgpu_job {
	void			*owner;
	uint64_t		fence_ctx; /* the fence_context this job uses */
	bool                    vm_needs_flush;
	bool			need_pipeline_sync;
	unsigned		vm_id;
	uint64_t		vm_pd_addr;
	uint32_t		gds_base, gds_size;
+3 −3
Original line number Diff line number Diff line
@@ -121,7 +121,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
{
	struct amdgpu_device *adev = ring->adev;
	struct amdgpu_ib *ib = &ibs[0];
	struct dma_fence *tmp;
	struct dma_fence *tmp = NULL;
	bool skip_preamble, need_ctx_switch;
	unsigned patch_offset = ~0;
	struct amdgpu_vm *vm;
@@ -163,8 +163,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
	}

	if (ring->funcs->emit_pipeline_sync && job &&
	    (tmp = amdgpu_sync_get_fence(&job->sched_sync))) {
		job->need_pipeline_sync = true;
	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
	     amdgpu_vm_need_pipeline_sync(ring, job))) {
		amdgpu_ring_emit_pipeline_sync(ring);
		dma_fence_put(tmp);
	}
+0 −1
Original line number Diff line number Diff line
@@ -57,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
	(*job)->vm = vm;
	(*job)->ibs = (void *)&(*job)[1];
	(*job)->num_ibs = num_ibs;
	(*job)->need_pipeline_sync = false;

	amdgpu_sync_create(&(*job)->sync);
	amdgpu_sync_create(&(*job)->sched_sync);
+29 −3
Original line number Diff line number Diff line
@@ -694,6 +694,35 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
	return addr;
}

bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
				  struct amdgpu_job *job)
{
	struct amdgpu_device *adev = ring->adev;
	unsigned vmhub = ring->funcs->vmhub;
	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	struct amdgpu_vm_id *id;
	bool gds_switch_needed;
	bool vm_flush_needed = job->vm_needs_flush ||
		amdgpu_vm_ring_has_compute_vm_bug(ring);

	if (job->vm_id == 0)
		return false;
	id = &id_mgr->ids[job->vm_id];
	gds_switch_needed = ring->funcs->emit_gds_switch && (
		id->gds_base != job->gds_base ||
		id->gds_size != job->gds_size ||
		id->gws_base != job->gws_base ||
		id->gws_size != job->gws_size ||
		id->oa_base != job->oa_base ||
		id->oa_size != job->oa_size);

	if (amdgpu_vm_had_gpu_reset(adev, id))
		return true;
	if (!vm_flush_needed && !gds_switch_needed)
		return false;
	return true;
}

/**
 * amdgpu_vm_flush - hardware flush the vm
 *
@@ -732,9 +761,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
	if (ring->funcs->init_cond_exec)
		patch_offset = amdgpu_ring_init_cond_exec(ring);

	if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync)
		amdgpu_ring_emit_pipeline_sync(ring);

	if (ring->funcs->emit_vm_flush && vm_flush_needed) {
		u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
		struct dma_fence *fence;
+2 −0
Original line number Diff line number Diff line
@@ -245,5 +245,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
		      struct amdgpu_bo_va *bo_va);
void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
				  struct amdgpu_job *job);

#endif