Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8f19cd78 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: remove last_entry_used from the VM code



Not needed any more.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e3a1b32a
Loading
Loading
Loading
Loading
+29 −23
Original line number Diff line number Diff line
@@ -329,9 +329,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
	    to >= amdgpu_vm_num_entries(adev, level))
		return -EINVAL;

	if (to > parent->last_entry_used)
		parent->last_entry_used = to;

	++level;
	saddr = saddr & ((1 << shift) - 1);
	eaddr = eaddr & ((1 << shift) - 1);
@@ -1184,16 +1181,19 @@ static int amdgpu_vm_update_pde(struct amdgpu_device *adev,
 *
 * Mark all PD level as invalid after an error.
 */
static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
				       struct amdgpu_vm_pt *parent)
static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
				       struct amdgpu_vm *vm,
				       struct amdgpu_vm_pt *parent,
				       unsigned level)
{
	unsigned pt_idx;
	unsigned pt_idx, num_entries;

	/*
	 * Recurse into the subdirectories. This recursion is harmless because
	 * we only have a maximum of 5 layers.
	 */
	for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
	num_entries = amdgpu_vm_num_entries(adev, level);
	for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
		struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];

		if (!entry->base.bo)
@@ -1204,7 +1204,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
		if (list_empty(&entry->base.vm_status))
			list_add(&entry->base.vm_status, &vm->relocated);
		spin_unlock(&vm->status_lock);
		amdgpu_vm_invalidate_level(vm, entry);
		amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
	}
}

@@ -1246,7 +1246,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,

			r = amdgpu_vm_update_pde(adev, vm, pt, entry);
			if (r) {
				amdgpu_vm_invalidate_level(vm, &vm->root);
				amdgpu_vm_invalidate_level(adev, vm,
							   &vm->root, 0);
				return r;
			}
			spin_lock(&vm->status_lock);
@@ -1649,7 +1650,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,

error_free:
	amdgpu_job_free(job);
	amdgpu_vm_invalidate_level(vm, &vm->root);
	amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
	return r;
}

@@ -2713,26 +2714,31 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/**
 * amdgpu_vm_free_levels - free PD/PT levels
 *
 * @level: PD/PT starting level to free
 * @adev: amdgpu device structure
 * @parent: PD/PT starting level to free
 * @level: level of parent structure
 *
 * Free the page directory or page table level and all sub levels.
 */
static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
				  struct amdgpu_vm_pt *parent,
				  unsigned level)
{
	unsigned i;
	unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);

	if (level->base.bo) {
		list_del(&level->base.bo_list);
		list_del(&level->base.vm_status);
		amdgpu_bo_unref(&level->base.bo->shadow);
		amdgpu_bo_unref(&level->base.bo);
	if (parent->base.bo) {
		list_del(&parent->base.bo_list);
		list_del(&parent->base.vm_status);
		amdgpu_bo_unref(&parent->base.bo->shadow);
		amdgpu_bo_unref(&parent->base.bo);
	}

	if (level->entries)
		for (i = 0; i <= level->last_entry_used; i++)
			amdgpu_vm_free_levels(&level->entries[i]);
	if (parent->entries)
		for (i = 0; i < num_entries; i++)
			amdgpu_vm_free_levels(adev, &parent->entries[i],
					      level + 1);

	kvfree(level->entries);
	kvfree(parent->entries);
}

/**
@@ -2790,7 +2796,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
	if (r) {
		dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
	} else {
		amdgpu_vm_free_levels(&vm->root);
		amdgpu_vm_free_levels(adev, &vm->root, 0);
		amdgpu_bo_unreserve(root);
	}
	amdgpu_bo_unref(&root);
+0 −1
Original line number Diff line number Diff line
@@ -142,7 +142,6 @@ struct amdgpu_vm_pt {

	/* array of page tables, one for each directory entry */
	struct amdgpu_vm_pt		*entries;
	unsigned			last_entry_used;
};

#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))