Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1717c0e2 authored by Dave Airlie's avatar Dave Airlie
Browse files

Revert "drm/ttm: add a way to bo_wait for either the last read or last write"



This reverts commit dfadbbdb.

Further upstream discussion between Marek and Thomas decided this wasn't
fully baked and needed further work, so revert it before it hits mainline.

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 83f30d0e
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -1104,8 +1104,7 @@ nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
	if (vma->node) {
		if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
			spin_lock(&nvbo->bo.bdev->fence_lock);
			ttm_bo_wait(&nvbo->bo, false, false, false,
				    TTM_USAGE_READWRITE);
			ttm_bo_wait(&nvbo->bo, false, false, false);
			spin_unlock(&nvbo->bo.bdev->fence_lock);
			nouveau_vm_unmap(vma);
		}
+2 −3
Original line number Diff line number Diff line
@@ -589,8 +589,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
		}

		spin_lock(&nvbo->bo.bdev->fence_lock);
		ret = ttm_bo_wait(&nvbo->bo, false, false, false,
				  TTM_USAGE_READWRITE);
		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
		spin_unlock(&nvbo->bo.bdev->fence_lock);
		if (ret) {
			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
@@ -826,7 +825,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
	nvbo = nouveau_gem_object(gem);

	spin_lock(&nvbo->bo.bdev->fence_lock);
	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait, TTM_USAGE_READWRITE);
	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
	spin_unlock(&nvbo->bo.bdev->fence_lock);
	drm_gem_object_unreference_unlocked(gem);
	return ret;
+0 −1
Original line number Diff line number Diff line
@@ -80,7 +80,6 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
			p->relocs[i].lobj.wdomain = r->write_domain;
			p->relocs[i].lobj.rdomain = r->read_domains;
			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
			p->relocs[i].lobj.tv.usage = TTM_USAGE_READWRITE;
			p->relocs[i].handle = r->handle;
			p->relocs[i].flags = r->flags;
			radeon_bo_list_add_object(&p->relocs[i].lobj,
+1 −1
Original line number Diff line number Diff line
@@ -527,7 +527,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait, false);
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
+26 −71
Original line number Diff line number Diff line
@@ -499,7 +499,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
	int ret;

	spin_lock(&bdev->fence_lock);
	(void) ttm_bo_wait(bo, false, false, true, TTM_USAGE_READWRITE);
	(void) ttm_bo_wait(bo, false, false, true);
	if (!bo->sync_obj) {

		spin_lock(&glob->lru_lock);
@@ -567,8 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,

retry:
	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
			  TTM_USAGE_READWRITE);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);

	if (unlikely(ret != 0))
@@ -727,8 +726,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
	int ret = 0;

	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
			  TTM_USAGE_READWRITE);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);

	if (unlikely(ret != 0)) {
@@ -1075,8 +1073,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
	 * instead of doing it here.
	 */
	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
			  TTM_USAGE_READWRITE);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);
	if (ret)
		return ret;
@@ -1697,83 +1694,34 @@ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
	return ret;
}

static void ttm_bo_unref_sync_obj_locked(struct ttm_buffer_object *bo,
					 void *sync_obj,
					 void **extra_sync_obj)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;
	void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL;

	/* We must unref the sync obj wherever it's ref'd.
	 * Note that if we unref bo->sync_obj, we can unref both the read
	 * and write sync objs too, because they can't be newer than
	 * bo->sync_obj, so they are no longer relevant. */
	if (sync_obj == bo->sync_obj ||
	    sync_obj == bo->sync_obj_read) {
		tmp_obj_read = bo->sync_obj_read;
		bo->sync_obj_read = NULL;
	}
	if (sync_obj == bo->sync_obj ||
	    sync_obj == bo->sync_obj_write) {
		tmp_obj_write = bo->sync_obj_write;
		bo->sync_obj_write = NULL;
	}
	if (sync_obj == bo->sync_obj) {
		tmp_obj = bo->sync_obj;
		bo->sync_obj = NULL;
	}

	clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
	spin_unlock(&bdev->fence_lock);
	if (tmp_obj)
		driver->sync_obj_unref(&tmp_obj);
	if (tmp_obj_read)
		driver->sync_obj_unref(&tmp_obj_read);
	if (tmp_obj_write)
		driver->sync_obj_unref(&tmp_obj_write);
	if (extra_sync_obj)
		driver->sync_obj_unref(extra_sync_obj);
	spin_lock(&bdev->fence_lock);
}

int ttm_bo_wait(struct ttm_buffer_object *bo,
		bool lazy, bool interruptible, bool no_wait,
		enum ttm_buffer_usage usage)
		bool lazy, bool interruptible, bool no_wait)
{
	struct ttm_bo_driver *driver = bo->bdev->driver;
	struct ttm_bo_device *bdev = bo->bdev;
	void *sync_obj;
	void *sync_obj_arg;
	int ret = 0;
	void **bo_sync_obj;

	switch (usage) {
	case TTM_USAGE_READ:
		bo_sync_obj = &bo->sync_obj_read;
		break;
	case TTM_USAGE_WRITE:
		bo_sync_obj = &bo->sync_obj_write;
		break;
	case TTM_USAGE_READWRITE:
	default:
		bo_sync_obj = &bo->sync_obj;
	}

	if (likely(*bo_sync_obj == NULL))
	if (likely(bo->sync_obj == NULL))
		return 0;

	while (*bo_sync_obj) {
	while (bo->sync_obj) {

		if (driver->sync_obj_signaled(*bo_sync_obj, bo->sync_obj_arg)) {
			ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, NULL);
		if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
			continue;
		}

		if (no_wait)
			return -EBUSY;

		sync_obj = driver->sync_obj_ref(*bo_sync_obj);
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
		sync_obj_arg = bo->sync_obj_arg;
		spin_unlock(&bdev->fence_lock);
		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
@@ -1784,9 +1732,16 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
			return ret;
		}
		spin_lock(&bdev->fence_lock);
		if (likely(*bo_sync_obj == sync_obj &&
		if (likely(bo->sync_obj == sync_obj &&
			   bo->sync_obj_arg == sync_obj_arg)) {
			ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, &sync_obj);
			void *tmp_obj = bo->sync_obj;
			bo->sync_obj = NULL;
			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
				  &bo->priv_flags);
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&sync_obj);
			driver->sync_obj_unref(&tmp_obj);
			spin_lock(&bdev->fence_lock);
		} else {
			spin_unlock(&bdev->fence_lock);
			driver->sync_obj_unref(&sync_obj);
@@ -1810,7 +1765,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
	if (unlikely(ret != 0))
		return ret;
	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, true, no_wait, TTM_USAGE_READWRITE);
	ret = ttm_bo_wait(bo, false, true, no_wait);
	spin_unlock(&bdev->fence_lock);
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
@@ -1884,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
	 */

	spin_lock(&bo->bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE);
	ret = ttm_bo_wait(bo, false, false, false);
	spin_unlock(&bo->bdev->fence_lock);

	if (unlikely(ret != 0))
Loading