Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dda9cd9f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm: don't drop handle reference on unload
  drm/ttm: Fix two race conditions + fix busy codepaths
parents afe14746 dab8dcfa
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -238,8 +238,8 @@ int intel_fbdev_destroy(struct drm_device *dev,

	drm_framebuffer_cleanup(&ifb->base);
	if (ifb->obj) {
		drm_gem_object_handle_unreference(ifb->obj);
		drm_gem_object_unreference(ifb->obj);
		ifb->obj = NULL;
	}

	return 0;
+0 −1
Original line number Diff line number Diff line
@@ -352,7 +352,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)

	if (nouveau_fb->nvbo) {
		nouveau_bo_unmap(nouveau_fb->nvbo);
		drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
		drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
		nouveau_fb->nvbo = NULL;
	}
+0 −1
Original line number Diff line number Diff line
@@ -79,7 +79,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
	mutex_lock(&dev->struct_mutex);
	nouveau_bo_unpin(chan->notifier_bo);
	mutex_unlock(&dev->struct_mutex);
	drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
	drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
	drm_mm_takedown(&chan->notifier_heap);
}
+0 −1
Original line number Diff line number Diff line
@@ -97,7 +97,6 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
		radeon_bo_unpin(rbo);
		radeon_bo_unreserve(rbo);
	}
	drm_gem_object_handle_unreference(gobj);
	drm_gem_object_unreference_unlocked(gobj);
}

+71 −12
Original line number Diff line number Diff line
@@ -441,6 +441,43 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
	return ret;
}

/**
 * Call bo::reserved and with the lru lock held.
 * Will release GPU memory type usage on destruction.
 * This is the place to put in driver specific hooks.
 * Will release the bo::reserved lock and the
 * lru lock on exit.
 */

static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
	struct ttm_bo_global *glob = bo->glob;

	if (bo->ttm) {

		/**
		 * Release the lru_lock, since we don't want to have
		 * an atomic requirement on ttm_tt[unbind|destroy].
		 */

		spin_unlock(&glob->lru_lock);
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
		spin_lock(&glob->lru_lock);
	}

	if (bo->mem.mm_node) {
		drm_mm_put_block(bo->mem.mm_node);
		bo->mem.mm_node = NULL;
	}

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	spin_unlock(&glob->lru_lock);
}


/**
 * If bo idle, remove from delayed- and lru lists, and unref.
 * If not idle, and already on delayed list, do nothing.
@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
	int ret;

	spin_lock(&bo->lock);
retry:
	(void) ttm_bo_wait(bo, false, false, !remove_all);

	if (!bo->sync_obj) {
@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
		spin_unlock(&bo->lock);

		spin_lock(&glob->lru_lock);
		put_count = ttm_bo_del_from_lru(bo);
		ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);

		ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
		BUG_ON(ret);
		if (bo->ttm)
			ttm_tt_unbind(bo->ttm);
		/**
		 * Someone else has the object reserved. Bail and retry.
		 */

		if (unlikely(ret == -EBUSY)) {
			spin_unlock(&glob->lru_lock);
			spin_lock(&bo->lock);
			goto requeue;
		}

		/**
		 * We can re-check for sync object without taking
		 * the bo::lock since setting the sync object requires
		 * also bo::reserved. A busy object at this point may
		 * be caused by another thread starting an accelerated
		 * eviction.
		 */

		if (unlikely(bo->sync_obj)) {
			atomic_set(&bo->reserved, 0);
			wake_up_all(&bo->event_queue);
			spin_unlock(&glob->lru_lock);
			spin_lock(&bo->lock);
			if (remove_all)
				goto retry;
			else
				goto requeue;
		}

		put_count = ttm_bo_del_from_lru(bo);

		if (!list_empty(&bo->ddestroy)) {
			list_del_init(&bo->ddestroy);
			++put_count;
		}
		if (bo->mem.mm_node) {
			drm_mm_put_block(bo->mem.mm_node);
			bo->mem.mm_node = NULL;
		}
		spin_unlock(&glob->lru_lock);

		atomic_set(&bo->reserved, 0);
		ttm_bo_cleanup_memtype_use(bo);

		while (put_count--)
			kref_put(&bo->list_kref, ttm_bo_ref_bug);

		return 0;
	}

requeue:
	spin_lock(&glob->lru_lock);
	if (list_empty(&bo->ddestroy)) {
		void *sync_obj = bo->sync_obj;
Loading