Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 161ab658 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/radeon: stop using addr to check for BO move



It is theoretically possible that a swapped out BO gets the
same GTT address, but different backing pages while being swapped in.

Instead just use another VA state to note updated areas.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 01062193
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -467,7 +467,6 @@ struct radeon_bo_va {
	/* protected by bo being reserved */
	struct list_head		bo_list;
	uint32_t			flags;
	uint64_t			addr;
	struct radeon_fence		*last_pt_update;
	unsigned			ref_count;

@@ -941,6 +940,9 @@ struct radeon_vm {
	/* BOs freed, but not yet updated in the PT */
	struct list_head	freed;

	/* BOs cleared in the PT */
	struct list_head	cleared;

	/* contains the page directory */
	struct radeon_bo	*page_directory;
	unsigned		max_pde_used;
+29 −24
Original line number Diff line number Diff line
@@ -331,7 +331,6 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
	bo_va->it.start = 0;
	bo_va->it.last = 0;
	bo_va->flags = 0;
	bo_va->addr = 0;
	bo_va->ref_count = 1;
	INIT_LIST_HEAD(&bo_va->bo_list);
	INIT_LIST_HEAD(&bo_va->vm_status);
@@ -491,9 +490,11 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
	}

	if (bo_va->it.start || bo_va->it.last) {
		if (bo_va->addr) {
		spin_lock(&vm->status_lock);
		if (list_empty(&bo_va->vm_status)) {
			/* add a clone of the bo_va to clear the old address */
			struct radeon_bo_va *tmp;
			spin_unlock(&vm->status_lock);
			tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
			if (!tmp) {
				mutex_unlock(&vm->mutex);
@@ -502,14 +503,11 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
			tmp->it.start = bo_va->it.start;
			tmp->it.last = bo_va->it.last;
			tmp->vm = vm;
			tmp->addr = bo_va->addr;
			tmp->bo = radeon_bo_ref(bo_va->bo);
			spin_lock(&vm->status_lock);
			list_add(&tmp->vm_status, &vm->freed);
			spin_unlock(&vm->status_lock);

			bo_va->addr = 0;
		}
		spin_unlock(&vm->status_lock);

		interval_tree_remove(&bo_va->it, &vm->va);
		bo_va->it.start = 0;
@@ -520,10 +518,12 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
		bo_va->it.start = soffset;
		bo_va->it.last = eoffset - 1;
		interval_tree_insert(&bo_va->it, &vm->va);
		spin_lock(&vm->status_lock);
		list_add(&bo_va->vm_status, &vm->cleared);
		spin_unlock(&vm->status_lock);
	}

	bo_va->flags = flags;
	bo_va->addr = 0;

	soffset >>= radeon_vm_block_size;
	eoffset >>= radeon_vm_block_size;
@@ -921,7 +921,16 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
	}

	spin_lock(&vm->status_lock);
	if (mem) {
		if (list_empty(&bo_va->vm_status)) {
			spin_unlock(&vm->status_lock);
			return 0;
		}
		list_del_init(&bo_va->vm_status);
	} else {
		list_del(&bo_va->vm_status);
		list_add(&bo_va->vm_status, &vm->cleared);
	}
	spin_unlock(&vm->status_lock);

	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
@@ -947,10 +956,6 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
		addr = 0;
	}

	if (addr == bo_va->addr)
		return 0;
	bo_va->addr = addr;

	trace_radeon_vm_bo_update(bo_va);

	nptes = bo_va->it.last - bo_va->it.start + 1;
@@ -1038,7 +1043,7 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
			  struct radeon_vm *vm)
{
	struct radeon_bo_va *bo_va;
	int r;
	int r = 0;

	spin_lock(&vm->status_lock);
	while (!list_empty(&vm->freed)) {
@@ -1049,14 +1054,15 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
		r = radeon_vm_bo_update(rdev, bo_va, NULL);
		radeon_bo_unref(&bo_va->bo);
		radeon_fence_unref(&bo_va->last_pt_update);
		spin_lock(&vm->status_lock);
		list_del(&bo_va->vm_status);
		kfree(bo_va);
		if (r)
			return r;
			break;

		spin_lock(&vm->status_lock);
	}
	spin_unlock(&vm->status_lock);
	return 0;
	return r;

}

@@ -1114,14 +1120,14 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
	mutex_lock(&vm->mutex);
	if (bo_va->it.start || bo_va->it.last)
		interval_tree_remove(&bo_va->it, &vm->va);
	spin_lock(&vm->status_lock);
	list_del(&bo_va->vm_status);

	if (bo_va->addr) {
	spin_lock(&vm->status_lock);
	if (list_empty(&bo_va->vm_status)) {
		bo_va->bo = radeon_bo_ref(bo_va->bo);
		list_add(&bo_va->vm_status, &vm->freed);
	} else {
		radeon_fence_unref(&bo_va->last_pt_update);
		list_del(&bo_va->vm_status);
		kfree(bo_va);
	}
	spin_unlock(&vm->status_lock);
@@ -1144,14 +1150,12 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
	struct radeon_bo_va *bo_va;

	list_for_each_entry(bo_va, &bo->va, bo_list) {
		if (bo_va->addr) {
		spin_lock(&bo_va->vm->status_lock);
			list_del(&bo_va->vm_status);
		if (list_empty(&bo_va->vm_status))
			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
		spin_unlock(&bo_va->vm->status_lock);
	}
}
}

/**
 * radeon_vm_init - initialize a vm instance
@@ -1179,6 +1183,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
	spin_lock_init(&vm->status_lock);
	INIT_LIST_HEAD(&vm->invalidated);
	INIT_LIST_HEAD(&vm->freed);
	INIT_LIST_HEAD(&vm->cleared);

	pd_size = radeon_vm_directory_size(rdev);
	pd_entries = radeon_vm_num_pdes(rdev);