Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 94dd0a4a authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: merge vm_grab_id and vm_fence v2



No need for an extra function any more.

v2: comment cleanups

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8d0a7cea
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -956,13 +956,10 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
				  struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
		      struct amdgpu_sync *sync);
		      struct amdgpu_sync *sync, struct fence *fence);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
		     struct amdgpu_vm *vm,
		     struct fence *updates);
void amdgpu_vm_fence(struct amdgpu_device *adev,
		     struct amdgpu_vm *vm,
		     struct fence *fence);
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
				    struct amdgpu_vm *vm);
+4 −9
Original line number Diff line number Diff line
@@ -38,19 +38,14 @@ static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)

	if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
		struct amdgpu_ring *ring = job->ibs->ring;
		struct amdgpu_device *adev = ring->adev;
		int r;

		mutex_lock(&adev->vm_manager.lock);
		r = amdgpu_vm_grab_id(vm, ring, sync);
		if (r) {
		r = amdgpu_vm_grab_id(vm, ring, sync,
				      &job->base.s_fence->base);
		if (r)
			DRM_ERROR("Error getting VM ID (%d)\n", r);
		} else {
			fence = &job->base.s_fence->base;
			amdgpu_vm_fence(ring->adev, vm, fence);
		else
			job->ibs->grabbed_vmid = true;
		}
		mutex_unlock(&adev->vm_manager.lock);

		fence = amdgpu_sync_get_fence(sync);
	}
+25 −32
Original line number Diff line number Diff line
@@ -152,13 +152,14 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 * @vm: vm to allocate id for
 * @ring: ring we want to submit job to
 * @sync: sync object where we add dependencies
 * @fence: fence protecting ID from reuse
 *
 * Allocate an id for the vm, adding fences to the sync obj as necessary.
 *
 * Global mutex must be locked!
 */
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
		      struct amdgpu_sync *sync)
		      struct amdgpu_sync *sync, struct fence *fence)
{
	struct fence *best[AMDGPU_MAX_RINGS] = {};
	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
@@ -167,6 +168,8 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
	unsigned choices[2] = {};
	unsigned i;

	mutex_lock(&adev->vm_manager.lock);

	/* check if the id is still valid */
	if (vm_id->id) {
		unsigned id = vm_id->id;
@@ -175,6 +178,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
		owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
		if (owner == (long)vm) {
			trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
			fence_put(adev->vm_manager.ids[id].active);
			adev->vm_manager.ids[id].active = fence_get(fence);
			mutex_unlock(&adev->vm_manager.lock);
			return 0;
		}
	}
@@ -191,6 +197,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
			/* found a free one */
			vm_id->id = i;
			trace_amdgpu_vm_grab_id(vm, i, ring->idx);
			mutex_unlock(&adev->vm_manager.lock);
			return 0;
		}

@@ -203,19 +210,29 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
	}

	for (i = 0; i < 2; ++i) {
		if (choices[i]) {
			struct fence *fence;
		struct fence *active;
		int r;

		if (!choices[i])
			continue;

			fence  = adev->vm_manager.ids[choices[i]].active;
		vm_id->id = choices[i];
		active  = adev->vm_manager.ids[vm_id->id].active;
		r = amdgpu_sync_fence(ring->adev, sync, active);

		trace_amdgpu_vm_grab_id(vm, choices[i], ring->idx);
			return amdgpu_sync_fence(ring->adev, sync, fence);
		}
		atomic_long_set(&adev->vm_manager.ids[vm_id->id].owner, (long)vm);

		fence_put(adev->vm_manager.ids[vm_id->id].active);
		adev->vm_manager.ids[vm_id->id].active = fence_get(fence);

		mutex_unlock(&adev->vm_manager.lock);
		return r;
	}

	/* should never happen */
	BUG();
	mutex_unlock(&adev->vm_manager.lock);
	return -EINVAL;
}

@@ -257,30 +274,6 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
	}
}

/**
 * amdgpu_vm_fence - remember fence for vm
 *
 * @adev: amdgpu_device pointer
 * @vm: vm we want to fence
 * @fence: fence to remember
 *
 * Fence the vm (cayman+).
 * Set the fence used to protect page table and id.
 *
 * Global and local mutex must be locked!
 */
void amdgpu_vm_fence(struct amdgpu_device *adev,
		     struct amdgpu_vm *vm,
		     struct fence *fence)
{
	struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
	unsigned vm_id = vm->ids[ring->idx].id;

	fence_put(adev->vm_manager.ids[vm_id].active);
	adev->vm_manager.ids[vm_id].active = fence_get(fence);
	atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
}

/**
 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
 *