Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a186308 authored by Maarten Lankhorst's avatar Maarten Lankhorst
Browse files

drm/ttm: cleanup ttm_eu_reserve_buffers handling



With the lru lock no longer required for protecting reservations we
can just do a ttm_bo_reserve_nolru on -EBUSY, and handle all errors
in a single path.

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
parent 63d0a419
Loading
Loading
Loading
Loading
+21 −32
Original line number Diff line number Diff line
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
	}
}

static int ttm_eu_wait_unreserved_locked(struct list_head *list,
					 struct ttm_buffer_object *bo)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret;

	ttm_eu_del_from_lru_locked(list);
	spin_unlock(&glob->lru_lock);
	ret = ttm_bo_wait_unreserved(bo, true);
	spin_lock(&glob->lru_lock);
	if (unlikely(ret != 0))
		ttm_eu_backoff_reservation_locked(list);
	return ret;
}


void ttm_eu_backoff_reservation(struct list_head *list)
{
	struct ttm_validate_buffer *entry;
@@ -152,19 +136,23 @@ retry:
	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;

retry_this_bo:
		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
			ret = ttm_eu_wait_unreserved_locked(list, bo);
			if (unlikely(ret != 0)) {
			ttm_eu_del_from_lru_locked(list);
			spin_unlock(&glob->lru_lock);
				ttm_eu_list_ref_sub(list);
				return ret;
			}
			goto retry_this_bo;
			ret = ttm_bo_reserve_nolru(bo, true, false,
						   true, val_seq);
			spin_lock(&glob->lru_lock);
			if (!ret)
				break;

			if (unlikely(ret != -EAGAIN))
				goto err;

			/* fallthrough */
		case -EAGAIN:
			ttm_eu_backoff_reservation_locked(list);
			spin_unlock(&glob->lru_lock);
@@ -174,18 +162,13 @@ retry_this_bo:
				return ret;
			goto retry;
		default:
			ttm_eu_backoff_reservation_locked(list);
			spin_unlock(&glob->lru_lock);
			ttm_eu_list_ref_sub(list);
			return ret;
			goto err;
		}

		entry->reserved = true;
		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
			ttm_eu_backoff_reservation_locked(list);
			spin_unlock(&glob->lru_lock);
			ttm_eu_list_ref_sub(list);
			return -EBUSY;
			ret = -EBUSY;
			goto err;
		}
	}

@@ -194,6 +177,12 @@ retry_this_bo:
	ttm_eu_list_ref_sub(list);

	return 0;

err:
	ttm_eu_backoff_reservation_locked(list);
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
	return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);