Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e338405 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Dave Airlie
Browse files

drm/ttm: convert to the reservation api



Now that the code is compatible in semantics, flip the switch.
Use ww_mutex instead of the homegrown implementation.

ww_mutex uses -EDEADLK to signal that the caller has to back off,
and -EALREADY to indicate this buffer is already held by the caller.

ttm used -EAGAIN and -EDEADLK for those, respectively. So some changes
were needed to handle this correctly.

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent b580c9e2
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -368,7 +368,7 @@ retry:
		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
		if (ret) {
			validate_fini_no_ticket(op, NULL);
			if (unlikely(ret == -EAGAIN)) {
			if (unlikely(ret == -EDEADLK)) {
				ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
							      &op->ticket);
				if (!ret)
+0 −5
Original line number Diff line number Diff line
@@ -57,11 +57,6 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
	return bo->tbo.num_pages << PAGE_SHIFT;
}

static inline bool qxl_bo_is_reserved(struct qxl_bo *bo)
{
	return !!atomic_read(&bo->tbo.reserved);
}

static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{
	return bo->tbo.addr_space_offset;
+50 −140
Original line number Diff line number Diff line
@@ -150,6 +150,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
	if (bo->ttm)
		ttm_tt_destroy(bo->ttm);
	atomic_dec(&bo->glob->bo_count);
	if (bo->resv == &bo->ttm_resv)
		reservation_object_fini(&bo->ttm_resv);

	if (bo->destroy)
		bo->destroy(bo);
	else {
@@ -158,18 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}

static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
				  bool interruptible)
{
	if (interruptible) {
		return wait_event_interruptible(bo->event_queue,
					       !ttm_bo_is_reserved(bo));
	} else {
		wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
		return 0;
	}
}

void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
@@ -218,65 +209,27 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
			  bool no_wait, bool use_ticket,
			  struct ww_acquire_ctx *ticket)
{
	int ret;
	int ret = 0;

	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
		/**
		 * Deadlock avoidance for multi-bo reserving.
		 */
		if (use_ticket && bo->seq_valid) {
			/**
			 * We've already reserved this one.
			 */
			if (unlikely(ticket->stamp == bo->val_seq))
				return -EDEADLK;
			/**
			 * Already reserved by a thread that will not back
			 * off for us. We need to back off.
			 */
			if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
				return -EAGAIN;
		}
	if (no_wait) {
		bool success;

		if (no_wait)
		/* not valid any more, fix your locking! */
		if (WARN_ON(ticket))
			return -EBUSY;

		ret = ttm_bo_wait_unreserved(bo, interruptible);

		if (unlikely(ret))
			return ret;
		success = ww_mutex_trylock(&bo->resv->lock);
		return success ? 0 : -EBUSY;
	}

	if (use_ticket) {
		bool wake_up = false;

		/**
		 * Wake up waiters that may need to recheck for deadlock,
		 * if we decreased the sequence number.
		 */
		if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
			     || !bo->seq_valid))
			wake_up = true;

		/*
		 * In the worst case with memory ordering these values can be
		 * seen in the wrong order. However since we call wake_up_all
		 * in that case, this will hopefully not pose a problem,
		 * and the worst case would only cause someone to accidentally
		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
		 * val_seq. However this would only happen if seq_valid was
		 * written before val_seq was, and just means some slightly
		 * increased cpu usage
		 */
		bo->val_seq = ticket->stamp;
		bo->seq_valid = true;
		if (wake_up)
			wake_up_all(&bo->event_queue);
	} else {
		bo->seq_valid = false;
	}

	return 0;
	if (interruptible)
		ret = ww_mutex_lock_interruptible(&bo->resv->lock,
						  ticket);
	else
		ret = ww_mutex_lock(&bo->resv->lock, ticket);
	if (ret == -EINTR)
		return -ERESTARTSYS;
	return ret;
}
EXPORT_SYMBOL(ttm_bo_reserve);

@@ -313,50 +266,27 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
	return ret;
}

int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
				  bool interruptible,
				  struct ww_acquire_ctx *ticket)
{
	bool wake_up = false;
	int ret;

	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
		WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);

		ret = ttm_bo_wait_unreserved(bo, interruptible);

		if (unlikely(ret))
			return ret;
	}

	if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
		wake_up = true;

	/**
	 * Wake up waiters that may need to recheck for deadlock,
	 * if we decreased the sequence number.
	 */
	bo->val_seq = ticket->stamp;
	bo->seq_valid = true;
	if (wake_up)
		wake_up_all(&bo->event_queue);

	return 0;
}

int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
			    bool interruptible, struct ww_acquire_ctx *ticket)
{
	struct ttm_bo_global *glob = bo->glob;
	int put_count, ret;
	int put_count = 0;
	int ret = 0;

	ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
	if (likely(!ret)) {
	if (interruptible)
		ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
						       ticket);
	else
		ww_mutex_lock_slow(&bo->resv->lock, ticket);

	if (likely(ret == 0)) {
		spin_lock(&glob->lru_lock);
		put_count = ttm_bo_del_from_lru(bo);
		spin_unlock(&glob->lru_lock);
		ttm_bo_list_ref_sub(bo, put_count, true);
	}
	} else if (ret == -EINTR)
		ret = -ERESTARTSYS;

	return ret;
}
EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
@@ -364,8 +294,7 @@ EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
{
	ttm_bo_add_to_lru(bo);
	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	ww_mutex_unlock(&bo->resv->lock);
}

void ttm_bo_unreserve(struct ttm_buffer_object *bo)
@@ -558,17 +487,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
	}
	ttm_bo_mem_put(bo, &bo->mem);

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);

	/*
	 * Since the final reference to this bo may not be dropped by
	 * the current task we have to put a memory barrier here to make
	 * sure the changes done in this function are always visible.
	 *
	 * This function only needs protection against the final kref_put.
	 */
	smp_mb__before_atomic_dec();
	ww_mutex_unlock (&bo->resv->lock);
}

static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
@@ -600,10 +519,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
	spin_unlock(&bdev->fence_lock);

	if (!ret) {
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
	}
	if (!ret)
		ww_mutex_unlock(&bo->resv->lock);

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
@@ -653,8 +570,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
		spin_unlock(&bdev->fence_lock);

		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
		ww_mutex_unlock(&bo->resv->lock);
		spin_unlock(&glob->lru_lock);

		ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -692,8 +608,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
		spin_unlock(&bdev->fence_lock);

	if (ret || unlikely(list_empty(&bo->ddestroy))) {
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
		ww_mutex_unlock(&bo->resv->lock);
		spin_unlock(&glob->lru_lock);
		return ret;
	}
@@ -1253,6 +1168,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
	int ret = 0;
	unsigned long num_pages;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	bool locked;

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (ret) {
@@ -1279,8 +1195,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	atomic_set(&bo->reserved, 1);
	init_waitqueue_head(&bo->event_queue);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
@@ -1298,36 +1212,33 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
	bo->mem.bus.io_reserved_count = 0;
	bo->priv_flags = 0;
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
	bo->seq_valid = false;
	bo->persistent_swap_storage = persistent_swap_storage;
	bo->acc_size = acc_size;
	bo->sg = sg;
	bo->resv = &bo->ttm_resv;
	reservation_object_init(bo->resv);
	atomic_inc(&bo->glob->bo_count);

	ret = ttm_bo_check_placement(bo, placement);
	if (unlikely(ret != 0))
		goto out_err;

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
	if (bo->type == ttm_bo_type_device ||
	    bo->type == ttm_bo_type_sg) {
	if (likely(!ret) &&
	    (bo->type == ttm_bo_type_device ||
	     bo->type == ttm_bo_type_sg))
		ret = ttm_bo_setup_vm(bo);
		if (ret)
			goto out_err;
	}

	locked = ww_mutex_trylock(&bo->resv->lock);
	WARN_ON(!locked);

	if (likely(!ret))
		ret = ttm_bo_validate(bo, placement, interruptible, false);
	if (ret)
		goto out_err;

	ttm_bo_unreserve(bo);
	return 0;

out_err:
	ttm_bo_unreserve(bo);
	if (unlikely(ret))
		ttm_bo_unref(&bo);

	return ret;
@@ -1941,8 +1852,7 @@ out:
	 * already swapped buffer.
	 */

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	ww_mutex_unlock(&bo->resv->lock);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
+5 −1
Original line number Diff line number Diff line
@@ -433,6 +433,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
	struct ttm_buffer_object *fbo;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_driver *driver = bdev->driver;
	int ret;

	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
	if (!fbo)
@@ -445,7 +446,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
	 * TODO: Explicit member copy would probably be better here.
	 */

	init_waitqueue_head(&fbo->event_queue);
	INIT_LIST_HEAD(&fbo->ddestroy);
	INIT_LIST_HEAD(&fbo->lru);
	INIT_LIST_HEAD(&fbo->swap);
@@ -463,6 +463,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
	kref_init(&fbo->kref);
	fbo->destroy = &ttm_transfered_destroy;
	fbo->acc_size = 0;
	fbo->resv = &fbo->ttm_resv;
	reservation_object_init(fbo->resv);
	ret = ww_mutex_trylock(&fbo->resv->lock);
	WARN_ON(!ret);

	*new_obj = fbo;
	return 0;
+17 −26
Original line number Diff line number Diff line
@@ -48,8 +48,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list,
			entry->removed = false;

		} else {
			atomic_set(&bo->reserved, 0);
			wake_up_all(&bo->event_queue);
			ww_mutex_unlock(&bo->resv->lock);
		}
	}
}
@@ -134,8 +133,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
	glob = entry->bo->glob;

	ww_acquire_init(ticket, &reservation_ww_class);
	spin_lock(&glob->lru_lock);

retry:
	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
@@ -144,42 +141,34 @@ retry:
		if (entry->reserved)
			continue;

		ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
			ttm_eu_del_from_lru_locked(list);
			spin_unlock(&glob->lru_lock);
			ret = ttm_bo_reserve_nolru(bo, true, false,
						   true, ticket);
			spin_lock(&glob->lru_lock);

			if (!ret)
				break;

			if (unlikely(ret != -EAGAIN))
				goto err;
		ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);

			/* fallthrough */
		case -EAGAIN:
		if (ret == -EDEADLK) {
			/* uh oh, we lost out, drop every reservation and try
			 * to only reserve this buffer, then start over if
			 * this succeeds.
			 */
			spin_lock(&glob->lru_lock);
			ttm_eu_backoff_reservation_locked(list, ticket);
			spin_unlock(&glob->lru_lock);
			ttm_eu_list_ref_sub(list);
			ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
			if (unlikely(ret != 0))
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
							       ticket);
			if (unlikely(ret != 0)) {
				if (ret == -EINTR)
					ret = -ERESTARTSYS;
				goto err_fini;
			}

			spin_lock(&glob->lru_lock);
			entry->reserved = true;
			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
				ret = -EBUSY;
				goto err;
			}
			goto retry;
		default:
		} else if (ret)
			goto err;
		}

		entry->reserved = true;
		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
@@ -189,12 +178,14 @@ retry:
	}

	ww_acquire_done(ticket);
	spin_lock(&glob->lru_lock);
	ttm_eu_del_from_lru_locked(list);
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
	return 0;

err:
	spin_lock(&glob->lru_lock);
	ttm_eu_backoff_reservation_locked(list, ticket);
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
Loading