Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d17fb44 authored by Thomas Hellstrom's avatar Thomas Hellstrom
Browse files

drm/ttm: Allow execbuf util reserves without ticket



If no reservation ticket is given to the execbuf reservation utilities,
try reservation with non-blocking semantics.
This is intended for eviction paths that use the execbuf reservation
utilities for convenience rather than for deadlock avoidance.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarJakob Bornecrantz <jakob@vmware.com>
parent a3483353
Loading
Loading
Loading
Loading
+19 −13
Original line number Diff line number Diff line
@@ -32,8 +32,7 @@
#include <linux/sched.h>
#include <linux/module.h>

static void ttm_eu_backoff_reservation_locked(struct list_head *list,
					      struct ww_acquire_ctx *ticket)
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
	struct ttm_validate_buffer *entry;

@@ -93,7 +92,8 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
	glob = entry->bo->glob;
	spin_lock(&glob->lru_lock);
	ttm_eu_backoff_reservation_locked(list, ticket);
	ttm_eu_backoff_reservation_locked(list);
	if (ticket)
		ww_acquire_fini(ticket);
	spin_unlock(&glob->lru_lock);
}
@@ -130,6 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
	glob = entry->bo->glob;

	if (ticket)
		ww_acquire_init(ticket, &reservation_ww_class);
retry:
	list_for_each_entry(entry, list, head) {
@@ -139,16 +140,17 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
		if (entry->reserved)
			continue;


		ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
		ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
					   ticket);

		if (ret == -EDEADLK) {
			/* uh oh, we lost out, drop every reservation and try
			 * to only reserve this buffer, then start over if
			 * this succeeds.
			 */
			BUG_ON(ticket == NULL);
			spin_lock(&glob->lru_lock);
			ttm_eu_backoff_reservation_locked(list, ticket);
			ttm_eu_backoff_reservation_locked(list);
			spin_unlock(&glob->lru_lock);
			ttm_eu_list_ref_sub(list);
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,6 +177,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
		}
	}

	if (ticket)
		ww_acquire_done(ticket);
	spin_lock(&glob->lru_lock);
	ttm_eu_del_from_lru_locked(list);
@@ -184,12 +187,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,

err:
	spin_lock(&glob->lru_lock);
	ttm_eu_backoff_reservation_locked(list, ticket);
	ttm_eu_backoff_reservation_locked(list);
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
err_fini:
	if (ticket) {
		ww_acquire_done(ticket);
		ww_acquire_fini(ticket);
	}
	return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,6 +229,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
	}
	spin_unlock(&bdev->fence_lock);
	spin_unlock(&glob->lru_lock);
	if (ticket)
		ww_acquire_fini(ticket);

	list_for_each_entry(entry, list, head) {
+2 −1
Original line number Diff line number Diff line
@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
/**
 * function ttm_eu_reserve_buffers
 *
 * @ticket:  [out] ww_acquire_ctx returned by call.
 * @ticket:  [out] ww_acquire_ctx filled in by call, or NULL if only
 *           non-blocking reserves should be tried.
 * @list:    thread private list of ttm_validate_buffer structs.
 *
 * Tries to reserve bos pointed to by the list entries for validation.