Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c5bc1c93 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull nouveau and radeon fixes from Dave Airlie:
 "Just some nouveau and radeon/amdgpu fixes.

  The nouveau fixes look large as the firmware context files are
  regenerated, but the actual change is quite small"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: make some dpm errors debug only
  drm/nouveau/volt/pwm/gk104: fix an off-by-one resulting in the voltage not being set
  drm/nouveau/nvif: allow userspace access to its own client object
  drm/nouveau/gr/gf100-: fix oops when calling zbc methods
  drm/nouveau/gr/gf117-: assume no PPC if NV_PGRAPH_GPC_GPM_PD_PES_TPC_ID_MASK is zero
  drm/nouveau/gr/gf117-: read NV_PGRAPH_GPC_GPM_PD_PES_TPC_ID_MASK from correct GPC
  drm/nouveau/gr/gf100-: split out per-gpc address calculation macro
  drm/nouveau/bios: return actual size of the buffer retrieved via _ROM
  drm/nouveau/instmem: protect instobj list with a spinlock
  drm/nouveau/pci: enable c800 magic for some unknown Samsung laptop
  drm/nouveau/pci: enable c800 magic for Clevo P157SM
  drm/radeon: make rv770_set_sw_state failures non-fatal
  drm/amdgpu: move dependency handling out of atomic section v2
  drm/amdgpu: optimize scheduler fence handling
  drm/amdgpu: remove vm->mutex
  drm/amdgpu: add mutex for ba_va->valids/invalids
  drm/amdgpu: adapt vce session create interface changes
  drm/amdgpu: vce use multiple cache surface starting from stoney
  drm/amdgpu: reset vce trap interrupt flag
parents 818aba30 8c14f72b
Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -496,6 +496,7 @@ struct amdgpu_bo_va_mapping {


/* bo virtual addresses in a specific vm */
/* bo virtual addresses in a specific vm */
struct amdgpu_bo_va {
struct amdgpu_bo_va {
	struct mutex		        mutex;
	/* protected by bo being reserved */
	/* protected by bo being reserved */
	struct list_head		bo_list;
	struct list_head		bo_list;
	struct fence		        *last_pt_update;
	struct fence		        *last_pt_update;
@@ -928,8 +929,6 @@ struct amdgpu_vm_id {
};
};


struct amdgpu_vm {
struct amdgpu_vm {
	struct mutex		mutex;

	struct rb_root		va;
	struct rb_root		va;


	/* protecting invalidated */
	/* protecting invalidated */
+0 −4
Original line number Original line Diff line number Diff line
@@ -784,8 +784,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_device *adev = dev->dev_private;
	union drm_amdgpu_cs *cs = data;
	union drm_amdgpu_cs *cs = data;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_cs_parser parser = {};
	struct amdgpu_cs_parser parser = {};
	bool reserved_buffers = false;
	bool reserved_buffers = false;
	int i, r;
	int i, r;
@@ -803,7 +801,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
		r = amdgpu_cs_handle_lockup(adev, r);
		r = amdgpu_cs_handle_lockup(adev, r);
		return r;
		return r;
	}
	}
	mutex_lock(&vm->mutex);
	r = amdgpu_cs_parser_relocs(&parser);
	r = amdgpu_cs_parser_relocs(&parser);
	if (r == -ENOMEM)
	if (r == -ENOMEM)
		DRM_ERROR("Not enough memory for command submission!\n");
		DRM_ERROR("Not enough memory for command submission!\n");
@@ -888,7 +885,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)


out:
out:
	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
	mutex_unlock(&vm->mutex);
	r = amdgpu_cs_handle_lockup(adev, r);
	r = amdgpu_cs_handle_lockup(adev, r);
	return r;
	return r;
}
}
+2 −12
Original line number Original line Diff line number Diff line
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_bo_va *bo_va;
	struct amdgpu_bo_va *bo_va;
	int r;
	int r;
	mutex_lock(&vm->mutex);
	r = amdgpu_bo_reserve(rbo, false);
	r = amdgpu_bo_reserve(rbo, false);
	if (r) {
	if (r)
		mutex_unlock(&vm->mutex);
		return r;
		return r;
	}


	bo_va = amdgpu_vm_bo_find(vm, rbo);
	bo_va = amdgpu_vm_bo_find(vm, rbo);
	if (!bo_va) {
	if (!bo_va) {
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
		++bo_va->ref_count;
		++bo_va->ref_count;
	}
	}
	amdgpu_bo_unreserve(rbo);
	amdgpu_bo_unreserve(rbo);
	mutex_unlock(&vm->mutex);
	return 0;
	return 0;
}
}


@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_vm *vm = &fpriv->vm;
	struct amdgpu_bo_va *bo_va;
	struct amdgpu_bo_va *bo_va;
	int r;
	int r;
	mutex_lock(&vm->mutex);
	r = amdgpu_bo_reserve(rbo, true);
	r = amdgpu_bo_reserve(rbo, true);
	if (r) {
	if (r) {
		mutex_unlock(&vm->mutex);
		dev_err(adev->dev, "leaking bo va because "
		dev_err(adev->dev, "leaking bo va because "
			"we fail to reserve bo (%d)\n", r);
			"we fail to reserve bo (%d)\n", r);
		return;
		return;
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
		}
		}
	}
	}
	amdgpu_bo_unreserve(rbo);
	amdgpu_bo_unreserve(rbo);
	mutex_unlock(&vm->mutex);
}
}


static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -553,7 +546,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
	if (gobj == NULL)
	if (gobj == NULL)
		return -ENOENT;
		return -ENOENT;
	mutex_lock(&fpriv->vm.mutex);
	rbo = gem_to_amdgpu_bo(gobj);
	rbo = gem_to_amdgpu_bo(gobj);
	INIT_LIST_HEAD(&list);
	INIT_LIST_HEAD(&list);
	INIT_LIST_HEAD(&duplicates);
	INIT_LIST_HEAD(&duplicates);
@@ -568,7 +560,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
	}
	}
	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
	if (r) {
	if (r) {
		mutex_unlock(&fpriv->vm.mutex);
		drm_gem_object_unreference_unlocked(gobj);
		drm_gem_object_unreference_unlocked(gobj);
		return r;
		return r;
	}
	}
@@ -577,7 +568,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
	if (!bo_va) {
	if (!bo_va) {
		ttm_eu_backoff_reservation(&ticket, &list);
		ttm_eu_backoff_reservation(&ticket, &list);
		drm_gem_object_unreference_unlocked(gobj);
		drm_gem_object_unreference_unlocked(gobj);
		mutex_unlock(&fpriv->vm.mutex);
		return -ENOENT;
		return -ENOENT;
	}
	}


@@ -602,7 +592,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
	ttm_eu_backoff_reservation(&ticket, &list);
	ttm_eu_backoff_reservation(&ticket, &list);
	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
	mutex_unlock(&fpriv->vm.mutex);

	drm_gem_object_unreference_unlocked(gobj);
	drm_gem_object_unreference_unlocked(gobj);
	return r;
	return r;
}
}
+10 −1
Original line number Original line Diff line number Diff line
@@ -392,6 +392,9 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
	ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
	ib->ptr[ib->length_dw++] = handle;
	ib->ptr[ib->length_dw++] = handle;


	if ((ring->adev->vce.fw_version >> 24) >= 52)
		ib->ptr[ib->length_dw++] = 0x00000040; /* len */
	else
		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
		ib->ptr[ib->length_dw++] = 0x00000030; /* len */
	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
	ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
	ib->ptr[ib->length_dw++] = 0x00000000;
	ib->ptr[ib->length_dw++] = 0x00000000;
@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
	ib->ptr[ib->length_dw++] = 0x00000100;
	ib->ptr[ib->length_dw++] = 0x00000100;
	ib->ptr[ib->length_dw++] = 0x0000000c;
	ib->ptr[ib->length_dw++] = 0x0000000c;
	ib->ptr[ib->length_dw++] = 0x00000000;
	ib->ptr[ib->length_dw++] = 0x00000000;
	if ((ring->adev->vce.fw_version >> 24) >= 52) {
		ib->ptr[ib->length_dw++] = 0x00000000;
		ib->ptr[ib->length_dw++] = 0x00000000;
		ib->ptr[ib->length_dw++] = 0x00000000;
		ib->ptr[ib->length_dw++] = 0x00000000;
	}


	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
	ib->ptr[ib->length_dw++] = 0x00000014; /* len */
	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
	ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
+11 −8
Original line number Original line Diff line number Diff line
@@ -922,8 +922,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
		bo_va = list_first_entry(&vm->invalidated,
		bo_va = list_first_entry(&vm->invalidated,
			struct amdgpu_bo_va, vm_status);
			struct amdgpu_bo_va, vm_status);
		spin_unlock(&vm->status_lock);
		spin_unlock(&vm->status_lock);

		mutex_lock(&bo_va->mutex);
		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
		r = amdgpu_vm_bo_update(adev, bo_va, NULL);
		mutex_unlock(&bo_va->mutex);
		if (r)
		if (r)
			return r;
			return r;


@@ -967,7 +968,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
	INIT_LIST_HEAD(&bo_va->valids);
	INIT_LIST_HEAD(&bo_va->valids);
	INIT_LIST_HEAD(&bo_va->invalids);
	INIT_LIST_HEAD(&bo_va->invalids);
	INIT_LIST_HEAD(&bo_va->vm_status);
	INIT_LIST_HEAD(&bo_va->vm_status);

	mutex_init(&bo_va->mutex);
	list_add_tail(&bo_va->bo_list, &bo->va);
	list_add_tail(&bo_va->bo_list, &bo->va);


	return bo_va;
	return bo_va;
@@ -1045,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
	mapping->offset = offset;
	mapping->offset = offset;
	mapping->flags = flags;
	mapping->flags = flags;


	mutex_lock(&bo_va->mutex);
	list_add(&mapping->list, &bo_va->invalids);
	list_add(&mapping->list, &bo_va->invalids);
	mutex_unlock(&bo_va->mutex);
	spin_lock(&vm->it_lock);
	spin_lock(&vm->it_lock);
	interval_tree_insert(&mapping->it, &vm->va);
	interval_tree_insert(&mapping->it, &vm->va);
	spin_unlock(&vm->it_lock);
	spin_unlock(&vm->it_lock);
@@ -1121,7 +1124,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
	bool valid = true;
	bool valid = true;


	saddr /= AMDGPU_GPU_PAGE_SIZE;
	saddr /= AMDGPU_GPU_PAGE_SIZE;

	mutex_lock(&bo_va->mutex);
	list_for_each_entry(mapping, &bo_va->valids, list) {
	list_for_each_entry(mapping, &bo_va->valids, list) {
		if (mapping->it.start == saddr)
		if (mapping->it.start == saddr)
			break;
			break;
@@ -1135,10 +1138,12 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
				break;
				break;
		}
		}


		if (&mapping->list == &bo_va->invalids)
		if (&mapping->list == &bo_va->invalids) {
			mutex_unlock(&bo_va->mutex);
			return -ENOENT;
			return -ENOENT;
		}
		}

	}
	mutex_unlock(&bo_va->mutex);
	list_del(&mapping->list);
	list_del(&mapping->list);
	spin_lock(&vm->it_lock);
	spin_lock(&vm->it_lock);
	interval_tree_remove(&mapping->it, &vm->va);
	interval_tree_remove(&mapping->it, &vm->va);
@@ -1190,8 +1195,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
		spin_unlock(&vm->it_lock);
		spin_unlock(&vm->it_lock);
		kfree(mapping);
		kfree(mapping);
	}
	}

	fence_put(bo_va->last_pt_update);
	fence_put(bo_va->last_pt_update);
	mutex_destroy(&bo_va->mutex);
	kfree(bo_va);
	kfree(bo_va);
}
}


@@ -1236,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
		vm->ids[i].id = 0;
		vm->ids[i].id = 0;
		vm->ids[i].flushed_updates = NULL;
		vm->ids[i].flushed_updates = NULL;
	}
	}
	mutex_init(&vm->mutex);
	vm->va = RB_ROOT;
	vm->va = RB_ROOT;
	spin_lock_init(&vm->status_lock);
	spin_lock_init(&vm->status_lock);
	INIT_LIST_HEAD(&vm->invalidated);
	INIT_LIST_HEAD(&vm->invalidated);
@@ -1320,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
		fence_put(vm->ids[i].flushed_updates);
		fence_put(vm->ids[i].flushed_updates);
	}
	}


	mutex_destroy(&vm->mutex);
}
}


/**
/**
Loading