Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 63d0a419 authored by Maarten Lankhorst's avatar Maarten Lankhorst
Browse files

drm/ttm: remove lru_lock around ttm_bo_reserve



There should no longer be assumptions that reserve will always succeed
with the lru lock held, so we can safely break the whole atomic
reserve/lru thing. As a bonus this fixes most lockdep annotations for
reservations.

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
parent 979ee290
Loading
Loading
Loading
Loading
+35 −19
Original line number Original line Diff line number Diff line
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
	return put_count;
	return put_count;
}
}


int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
			  bool interruptible,
			  bool interruptible,
			  bool no_wait, bool use_sequence, uint32_t sequence)
			  bool no_wait, bool use_sequence, uint32_t sequence)
{
{
	struct ttm_bo_global *glob = bo->glob;
	int ret;
	int ret;


	while (unlikely(atomic_read(&bo->reserved) != 0)) {
	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
		/**
		/**
		 * Deadlock avoidance for multi-bo reserving.
		 * Deadlock avoidance for multi-bo reserving.
		 */
		 */
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
		if (no_wait)
		if (no_wait)
			return -EBUSY;
			return -EBUSY;


		spin_unlock(&glob->lru_lock);
		ret = ttm_bo_wait_unreserved(bo, interruptible);
		ret = ttm_bo_wait_unreserved(bo, interruptible);
		spin_lock(&glob->lru_lock);


		if (unlikely(ret))
		if (unlikely(ret))
			return ret;
			return ret;
	}
	}


	atomic_set(&bo->reserved, 1);
	if (use_sequence) {
	if (use_sequence) {
		bool wake_up = false;
		/**
		/**
		 * Wake up waiters that may need to recheck for deadlock,
		 * Wake up waiters that may need to recheck for deadlock,
		 * if we decreased the sequence number.
		 * if we decreased the sequence number.
		 */
		 */
		if (unlikely((bo->val_seq - sequence < (1 << 31))
		if (unlikely((bo->val_seq - sequence < (1 << 31))
			     || !bo->seq_valid))
			     || !bo->seq_valid))
			wake_up_all(&bo->event_queue);
			wake_up = true;


		/*
		 * In the worst case with memory ordering these values can be
		 * seen in the wrong order. However since we call wake_up_all
		 * in that case, this will hopefully not pose a problem,
		 * and the worst case would only cause someone to accidentally
		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
		 * val_seq. However this would only happen if seq_valid was
		 * written before val_seq was, and just means some slightly
		 * increased cpu usage
		 */
		bo->val_seq = sequence;
		bo->val_seq = sequence;
		bo->seq_valid = true;
		bo->seq_valid = true;
		if (wake_up)
			wake_up_all(&bo->event_queue);
	} else {
	} else {
		bo->seq_valid = false;
		bo->seq_valid = false;
	}
	}
@@ -289,14 +298,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
	int put_count = 0;
	int put_count = 0;
	int ret;
	int ret;


	spin_lock(&glob->lru_lock);
	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
				   sequence);
				   sequence);
	if (likely(ret == 0))
	if (likely(ret == 0)) {
		spin_lock(&glob->lru_lock);
		put_count = ttm_bo_del_from_lru(bo);
		put_count = ttm_bo_del_from_lru(bo);
		spin_unlock(&glob->lru_lock);
		spin_unlock(&glob->lru_lock);

		ttm_bo_list_ref_sub(bo, put_count, true);
		ttm_bo_list_ref_sub(bo, put_count, true);
	}


	return ret;
	return ret;
}
}
@@ -510,7 +519,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
	int ret;
	int ret;


	spin_lock(&glob->lru_lock);
	spin_lock(&glob->lru_lock);
	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);


	spin_lock(&bdev->fence_lock);
	spin_lock(&bdev->fence_lock);
	(void) ttm_bo_wait(bo, false, false, true);
	(void) ttm_bo_wait(bo, false, false, true);
@@ -603,7 +612,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
			return ret;
			return ret;


		spin_lock(&glob->lru_lock);
		spin_lock(&glob->lru_lock);
		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);


		/*
		/*
		 * We raced, and lost, someone else holds the reservation now,
		 * We raced, and lost, someone else holds the reservation now,
@@ -667,7 +676,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
			kref_get(&nentry->list_kref);
			kref_get(&nentry->list_kref);
		}
		}


		ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
		if (remove_all && ret) {
			spin_unlock(&glob->lru_lock);
			ret = ttm_bo_reserve_nolru(entry, false, false,
						   false, 0);
			spin_lock(&glob->lru_lock);
		}

		if (!ret)
		if (!ret)
			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
			ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
							     !remove_all);
							     !remove_all);
@@ -815,7 +831,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,


	spin_lock(&glob->lru_lock);
	spin_lock(&glob->lru_lock);
	list_for_each_entry(bo, &man->lru, lru) {
	list_for_each_entry(bo, &man->lru, lru) {
		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
		if (!ret)
		if (!ret)
			break;
			break;
	}
	}
@@ -1796,7 +1812,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)


	spin_lock(&glob->lru_lock);
	spin_lock(&glob->lru_lock);
	list_for_each_entry(bo, &glob->swap_lru, swap) {
	list_for_each_entry(bo, &glob->swap_lru, swap) {
		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
		if (!ret)
		if (!ret)
			break;
			break;
	}
	}
+1 −1
Original line number Original line Diff line number Diff line
@@ -153,7 +153,7 @@ retry:
		struct ttm_buffer_object *bo = entry->bo;
		struct ttm_buffer_object *bo = entry->bo;


retry_this_bo:
retry_this_bo:
		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
		switch (ret) {
		switch (ret) {
		case 0:
		case 0:
			break;
			break;
+4 −15
Original line number Original line Diff line number Diff line
@@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
 * to make room for a buffer already reserved. (Buffers are reserved before
 * to make room for a buffer already reserved. (Buffers are reserved before
 * they are evicted). The following algorithm prevents such deadlocks from
 * they are evicted). The following algorithm prevents such deadlocks from
 * occurring:
 * occurring:
 * 1) Buffers are reserved with the lru spinlock held. Upon successful
 * Processes attempting to reserve multiple buffers other than for eviction,
 * reservation they are removed from the lru list. This stops a reserved buffer
 * from being evicted. However the lru spinlock is released between the time
 * a buffer is selected for eviction and the time it is reserved.
 * Therefore a check is made when a buffer is reserved for eviction, that it
 * is still the first buffer in the lru list, before it is removed from the
 * list. @check_lru == 1 forces this check. If it fails, the function returns
 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
 * the procedure.
 * 2) Processes attempting to reserve multiple buffers other than for eviction,
 * (typically execbuf), should first obtain a unique 32-bit
 * (typically execbuf), should first obtain a unique 32-bit
 * validation sequence number,
 * validation sequence number,
 * and call this function with @use_sequence == 1 and @sequence == the unique
 * and call this function with @use_sequence == 1 and @sequence == the unique
@@ -832,7 +823,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,




/**
/**
 * ttm_bo_reserve_locked:
 * ttm_bo_reserve_nolru:
 *
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @bo: A pointer to a struct ttm_buffer_object.
 * @interruptible: Sleep interruptible if waiting.
 * @interruptible: Sleep interruptible if waiting.
@@ -840,9 +831,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 * @use_sequence: If @bo is already reserved, Only sleep waiting for
 * @use_sequence: If @bo is already reserved, Only sleep waiting for
 * it to become unreserved if @sequence < (@bo)->sequence.
 * it to become unreserved if @sequence < (@bo)->sequence.
 *
 *
 * Must be called with struct ttm_bo_global::lru_lock held,
 * Will not remove reserved buffers from the lru lists.
 * and will not remove reserved buffers from the lru lists.
 * The function may release the LRU spinlock if it needs to sleep.
 * Otherwise identical to ttm_bo_reserve.
 * Otherwise identical to ttm_bo_reserve.
 *
 *
 * Returns:
 * Returns:
@@ -855,7 +844,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 * -EDEADLK: Bo already reserved using @sequence. This error code will only
 * -EDEADLK: Bo already reserved using @sequence. This error code will only
 * be returned if @use_sequence is set to true.
 * be returned if @use_sequence is set to true.
 */
 */
extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
				 bool interruptible,
				 bool interruptible,
				 bool no_wait, bool use_sequence,
				 bool no_wait, bool use_sequence,
				 uint32_t sequence);
				 uint32_t sequence);