Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dfaae392 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Clear the gpu_write_list on resetting write_domain upon hang



Otherwise we will hit a list handling assertion when moving the object
to the inactive list.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 9e0ae534
Loading
Loading
Loading
Loading
+1 −15
Original line number Diff line number Diff line
@@ -395,21 +395,7 @@ int i915_reset(struct drm_device *dev, u8 flags)

	mutex_lock(&dev->struct_mutex);

	/*
	 * Clear request list
	 */
	i915_gem_retire_requests(dev);

	/* Remove anything from the flushing lists. The GPU cache is likely
	 * to be lost on reset along with the data, so simply move the
	 * lost bo to the inactive list.
	 */
	i915_gem_reset_flushing_list(dev);

	/* Move everything out of the GPU domains to ensure we do any
	 * necessary invalidation upon reuse.
	 */
	i915_gem_reset_inactive_gpu_domains(dev);
	i915_gem_reset_lists(dev);

	/*
	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+1 −2
Original line number Diff line number Diff line
@@ -1005,8 +1005,7 @@ int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
				  bool interruptible);
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset_flushing_list(struct drm_device *dev);
void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev);
void i915_gem_reset_lists(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_gem_object *obj);
void i915_gem_flush_ring(struct drm_device *dev,
			 struct drm_file *file_priv,
+40 −11
Original line number Diff line number Diff line
@@ -1682,27 +1682,60 @@ i915_get_gem_seqno(struct drm_device *dev,
	return ring->get_gem_seqno(dev, ring);
}

void i915_gem_reset_flushing_list(struct drm_device *dev)
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
				      struct intel_ring_buffer *ring)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;

	while (!list_empty(&dev_priv->mm.flushing_list)) {
		request = list_first_entry(&ring->request_list,
					   struct drm_i915_gem_request,
					   list);

		list_del(&request->list);
		list_del(&request->client_list);
		kfree(request);
	}

	while (!list_empty(&ring->active_list)) {
		struct drm_i915_gem_object *obj_priv;

		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
		obj_priv = list_first_entry(&ring->active_list,
					    struct drm_i915_gem_object,
					    list);

		obj_priv->base.write_domain = 0;
		list_del_init(&obj_priv->gpu_write_list);
		i915_gem_object_move_to_inactive(&obj_priv->base);
	}
}

void i915_gem_reset_inactive_gpu_domains(struct drm_device *dev)
void i915_gem_reset_lists(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj_priv;

	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
	if (HAS_BSD(dev))
		i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);

	/* Remove anything from the flushing lists. The GPU cache is likely
	 * to be lost on reset along with the data, so simply move the
	 * lost bo to the inactive list.
	 */
	while (!list_empty(&dev_priv->mm.flushing_list)) {
		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
					    struct drm_i915_gem_object,
					    list);

		obj_priv->base.write_domain = 0;
		list_del_init(&obj_priv->gpu_write_list);
		i915_gem_object_move_to_inactive(&obj_priv->base);
	}

	/* Move everything out of the GPU domains to ensure we do any
	 * necessary invalidation upon reuse.
	 */
	list_for_each_entry(obj_priv,
			    &dev_priv->mm.inactive_list,
			    list)
@@ -1720,15 +1753,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t seqno;
	bool wedged;

	if (!ring->status_page.page_addr ||
	    list_empty(&ring->request_list))
		return;

	seqno = i915_get_gem_seqno(dev, ring);
	wedged = atomic_read(&dev_priv->mm.wedged);

	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;

@@ -1736,7 +1766,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
					   struct drm_i915_gem_request,
					   list);

		if (!wedged && !i915_seqno_passed(seqno, request->seqno))
		if (!i915_seqno_passed(seqno, request->seqno))
			break;

		trace_i915_gem_request_retire(dev, request->seqno);
@@ -1757,8 +1787,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
					    struct drm_i915_gem_object,
					    list);

		if (!wedged &&
		    !i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
			break;

		obj = &obj_priv->base;