Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9af90d19 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: cache the last object lookup during pin_and_relocate()



The most frequent relocation within a batchbuffer is a contiguous sequence
of vertex buffer relocations, for which we can virtually eliminate the
drm_gem_object_lookup() overhead by caching the last handle to object
translation.

In doing so we refactor the pin and relocate retry loop out of
do_execbuffer into its own helper function and so improve the error
paths.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 1d7cfea1
Loading
Loading
Loading
Loading
+144 −171
Original line number Original line Diff line number Diff line
@@ -2152,6 +2152,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)


	drm_mm_put_block(obj_priv->gtt_space);
	drm_mm_put_block(obj_priv->gtt_space);
	obj_priv->gtt_space = NULL;
	obj_priv->gtt_space = NULL;
	obj_priv->gtt_offset = 0;


	if (i915_gem_object_is_purgeable(obj_priv))
	if (i915_gem_object_is_purgeable(obj_priv))
		i915_gem_object_truncate(obj);
		i915_gem_object_truncate(obj);
@@ -2645,12 +2646,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 search_free:
 search_free:
	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
					obj->size, alignment, 0);
					obj->size, alignment, 0);
	if (free_space != NULL) {
	if (free_space != NULL)
		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
						       alignment);
						       alignment);
		if (obj_priv->gtt_space != NULL)
			obj_priv->gtt_offset = obj_priv->gtt_space->start;
	}
	if (obj_priv->gtt_space == NULL) {
	if (obj_priv->gtt_space == NULL) {
		/* If the gtt is empty and we're still having trouble
		/* If the gtt is empty and we're still having trouble
		 * fitting our object in, we're out of memory.
		 * fitting our object in, we're out of memory.
@@ -2693,7 +2691,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
	obj_priv->agp_mem = drm_agp_bind_pages(dev,
	obj_priv->agp_mem = drm_agp_bind_pages(dev,
					       obj_priv->pages,
					       obj_priv->pages,
					       obj->size >> PAGE_SHIFT,
					       obj->size >> PAGE_SHIFT,
					       obj_priv->gtt_offset,
					       obj_priv->gtt_space->start,
					       obj_priv->agp_type);
					       obj_priv->agp_type);
	if (obj_priv->agp_mem == NULL) {
	if (obj_priv->agp_mem == NULL) {
		i915_gem_object_put_pages(obj);
		i915_gem_object_put_pages(obj);
@@ -2718,6 +2716,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);


	obj_priv->gtt_offset = obj_priv->gtt_space->start;
	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);


	return 0;
	return 0;
@@ -3240,74 +3239,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 * Pin an object to the GTT and evaluate the relocations landing in it.
 * Pin an object to the GTT and evaluate the relocations landing in it.
 */
 */
static int
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
			     struct drm_file *file_priv,
			     struct drm_file *file_priv,
			     struct drm_i915_gem_exec_object2 *entry)
			     struct drm_i915_gem_exec_object2 *entry)
{
{
	struct drm_device *dev = obj->dev;
	struct drm_device *dev = obj->base.dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
	struct drm_i915_gem_relocation_entry __user *user_relocs;
	struct drm_i915_gem_relocation_entry __user *user_relocs;
	int i, ret;
	struct drm_gem_object *target_obj = NULL;
	bool need_fence;
	uint32_t target_handle = 0;

	int i, ret = 0;
	need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
	             obj_priv->tiling_mode != I915_TILING_NONE;

	/* Check fence reg constraints and rebind if necessary */
	if (need_fence &&
	    !i915_gem_object_fence_offset_ok(obj,
					     obj_priv->tiling_mode)) {
		ret = i915_gem_object_unbind(obj);
		if (ret)
			return ret;
	}

	/* Choose the GTT offset for our buffer and put it there. */
	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
	if (ret)
		return ret;

	/*
	 * Pre-965 chips need a fence register set up in order to
	 * properly handle blits to/from tiled surfaces.
	 */
	if (need_fence) {
		ret = i915_gem_object_get_fence_reg(obj, true);
		if (ret != 0) {
			i915_gem_object_unpin(obj);
			return ret;
		}

		dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
	}

	entry->offset = obj_priv->gtt_offset;


	/* Apply the relocations, using the GTT aperture to avoid cache
	 * flushing requirements.
	 */
	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
	for (i = 0; i < entry->relocation_count; i++) {
	for (i = 0; i < entry->relocation_count; i++) {
		struct drm_i915_gem_relocation_entry reloc;
		struct drm_i915_gem_relocation_entry reloc;
		struct drm_gem_object *target_obj;
		uint32_t target_offset;
		struct drm_i915_gem_object *target_obj_priv;


		ret = __copy_from_user_inatomic(&reloc,
		if (__copy_from_user_inatomic(&reloc,
					      user_relocs+i,
					      user_relocs+i,
						sizeof(reloc));
					      sizeof(reloc))) {
		if (ret) {
			ret = -EFAULT;
			i915_gem_object_unpin(obj);
			break;
			return -EFAULT;
		}
		}


		target_obj = drm_gem_object_lookup(obj->dev, file_priv,
		if (reloc.target_handle != target_handle) {
			drm_gem_object_unreference(target_obj);

			target_obj = drm_gem_object_lookup(dev, file_priv,
							   reloc.target_handle);
							   reloc.target_handle);
			if (target_obj == NULL) {
			if (target_obj == NULL) {
			i915_gem_object_unpin(obj);
				ret = -ENOENT;
			return -ENOENT;
				break;
			}
			}
		target_obj_priv = to_intel_bo(target_obj);

			target_handle = reloc.target_handle;
		}
		target_offset = to_intel_bo(target_obj)->gtt_offset;


#if WATCH_RELOC
#if WATCH_RELOC
		DRM_INFO("%s: obj %p offset %08x target %d "
		DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3319,7 +3286,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
			 (int) reloc.target_handle,
			 (int) reloc.target_handle,
			 (int) reloc.read_domains,
			 (int) reloc.read_domains,
			 (int) reloc.write_domain,
			 (int) reloc.write_domain,
			 (int) target_obj_priv->gtt_offset,
			 (int) target_offset,
			 (int) reloc.presumed_offset,
			 (int) reloc.presumed_offset,
			 reloc.delta);
			 reloc.delta);
#endif
#endif
@@ -3327,12 +3294,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
		/* The target buffer should have appeared before us in the
		/* The target buffer should have appeared before us in the
		 * exec_object list, so it should have a GTT space bound by now.
		 * exec_object list, so it should have a GTT space bound by now.
		 */
		 */
		if (target_obj_priv->gtt_space == NULL) {
		if (target_offset == 0) {
			DRM_ERROR("No GTT space found for object %d\n",
			DRM_ERROR("No GTT space found for object %d\n",
				  reloc.target_handle);
				  reloc.target_handle);
			drm_gem_object_unreference(target_obj);
			ret = -EINVAL;
			i915_gem_object_unpin(obj);
			break;
			return -EINVAL;
		}
		}


		/* Validate that the target is in a valid r/w GPU domain */
		/* Validate that the target is in a valid r/w GPU domain */
@@ -3344,9 +3310,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
				  (int) reloc.offset,
				  (int) reloc.offset,
				  reloc.read_domains,
				  reloc.read_domains,
				  reloc.write_domain);
				  reloc.write_domain);
			drm_gem_object_unreference(target_obj);
			ret = -EINVAL;
			i915_gem_object_unpin(obj);
			break;
			return -EINVAL;
		}
		}
		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
@@ -3357,9 +3322,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
				  (int) reloc.offset,
				  (int) reloc.offset,
				  reloc.read_domains,
				  reloc.read_domains,
				  reloc.write_domain);
				  reloc.write_domain);
			drm_gem_object_unreference(target_obj);
			ret = -EINVAL;
			i915_gem_object_unpin(obj);
			break;
			return -EINVAL;
		}
		}
		if (reloc.write_domain && target_obj->pending_write_domain &&
		if (reloc.write_domain && target_obj->pending_write_domain &&
		    reloc.write_domain != target_obj->pending_write_domain) {
		    reloc.write_domain != target_obj->pending_write_domain) {
@@ -3370,40 +3334,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
				  (int) reloc.offset,
				  (int) reloc.offset,
				  reloc.write_domain,
				  reloc.write_domain,
				  target_obj->pending_write_domain);
				  target_obj->pending_write_domain);
			drm_gem_object_unreference(target_obj);
			ret = -EINVAL;
			i915_gem_object_unpin(obj);
			break;
			return -EINVAL;
		}
		}


		target_obj->pending_read_domains |= reloc.read_domains;
		target_obj->pending_read_domains |= reloc.read_domains;
		target_obj->pending_write_domain |= reloc.write_domain;
		target_obj->pending_write_domain = reloc.write_domain;


		/* If the relocation already has the right value in it, no
		/* If the relocation already has the right value in it, no
		 * more work needs to be done.
		 * more work needs to be done.
		 */
		 */
		if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
		if (target_offset == reloc.presumed_offset)
			drm_gem_object_unreference(target_obj);
			continue;
			continue;
		}


		/* Check that the relocation address is valid... */
		/* Check that the relocation address is valid... */
		if (reloc.offset > obj->size - 4) {
		if (reloc.offset > obj->base.size - 4) {
			DRM_ERROR("Relocation beyond object bounds: "
			DRM_ERROR("Relocation beyond object bounds: "
				  "obj %p target %d offset %d size %d.\n",
				  "obj %p target %d offset %d size %d.\n",
				  obj, reloc.target_handle,
				  obj, reloc.target_handle,
				  (int) reloc.offset, (int) obj->size);
				  (int) reloc.offset, (int) obj->base.size);
			drm_gem_object_unreference(target_obj);
			ret = -EINVAL;
			i915_gem_object_unpin(obj);
			break;
			return -EINVAL;
		}
		}
		if (reloc.offset & 3) {
		if (reloc.offset & 3) {
			DRM_ERROR("Relocation not 4-byte aligned: "
			DRM_ERROR("Relocation not 4-byte aligned: "
				  "obj %p target %d offset %d.\n",
				  "obj %p target %d offset %d.\n",
				  obj, reloc.target_handle,
				  obj, reloc.target_handle,
				  (int) reloc.offset);
				  (int) reloc.offset);
			drm_gem_object_unreference(target_obj);
			ret = -EINVAL;
			i915_gem_object_unpin(obj);
			break;
			return -EINVAL;
		}
		}


		/* and points to somewhere within the target object. */
		/* and points to somewhere within the target object. */
@@ -3412,33 +3371,28 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
				  "obj %p target %d delta %d size %d.\n",
				  "obj %p target %d delta %d size %d.\n",
				  obj, reloc.target_handle,
				  obj, reloc.target_handle,
				  (int) reloc.delta, (int) target_obj->size);
				  (int) reloc.delta, (int) target_obj->size);
			drm_gem_object_unreference(target_obj);
			ret = -EINVAL;
			i915_gem_object_unpin(obj);
			break;
			return -EINVAL;
		}
		}


		reloc.delta += target_obj_priv->gtt_offset;
		reloc.delta += target_offset;
		if (obj->write_domain == I915_GEM_DOMAIN_CPU) {
		if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
			uint32_t page_offset = reloc.offset & ~PAGE_MASK;
			uint32_t page_offset = reloc.offset & ~PAGE_MASK;
			char *vaddr;
			char *vaddr;


			vaddr = kmap_atomic(obj_priv->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
			vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
			*(uint32_t *)(vaddr + page_offset) = reloc.delta;
			*(uint32_t *)(vaddr + page_offset) = reloc.delta;
			kunmap_atomic(vaddr, KM_USER0);
			kunmap_atomic(vaddr, KM_USER0);
		} else {
		} else {
			uint32_t __iomem *reloc_entry;
			uint32_t __iomem *reloc_entry;
			void __iomem *reloc_page;
			void __iomem *reloc_page;
			int ret;


			ret = i915_gem_object_set_to_gtt_domain(obj, 1);
			ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
			if (ret) {
			if (ret)
				drm_gem_object_unreference(target_obj);
				break;
				i915_gem_object_unpin(obj);
				return ret;
			}


			/* Map the page containing the relocation we're going to perform.  */
			/* Map the page containing the relocation we're going to perform.  */
			reloc.offset += obj_priv->gtt_offset;
			reloc.offset += obj->gtt_offset;
			reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
			reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
							      reloc.offset & PAGE_MASK,
							      reloc.offset & PAGE_MASK,
							      KM_USER0);
							      KM_USER0);
@@ -3447,8 +3401,74 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
			iowrite32(reloc.delta, reloc_entry);
			iowrite32(reloc.delta, reloc_entry);
			io_mapping_unmap_atomic(reloc_page, KM_USER0);
			io_mapping_unmap_atomic(reloc_page, KM_USER0);
		}
		}
	}


	drm_gem_object_unreference(target_obj);
	drm_gem_object_unreference(target_obj);
	return ret;
}

static int
i915_gem_execbuffer_pin(struct drm_device *dev,
			struct drm_file *file,
			struct drm_gem_object **object_list,
			struct drm_i915_gem_exec_object2 *exec_list,
			int count)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret, i, retry;

	/* attempt to pin all of the buffers into the GTT */
	for (retry = 0; retry < 2; retry++) {
		ret = 0;
		for (i = 0; i < count; i++) {
			struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
			struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
			bool need_fence =
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;

			/* Check fence reg constraints and rebind if necessary */
			if (need_fence &&
			    !i915_gem_object_fence_offset_ok(&obj->base,
							     obj->tiling_mode)) {
				ret = i915_gem_object_unbind(&obj->base);
				if (ret)
					break;
			}

			ret = i915_gem_object_pin(&obj->base, entry->alignment);
			if (ret)
				break;

			/*
			 * Pre-965 chips need a fence register set up in order
			 * to properly handle blits to/from tiled surfaces.
			 */
			if (need_fence) {
				ret = i915_gem_object_get_fence_reg(&obj->base, true);
				if (ret) {
					i915_gem_object_unpin(&obj->base);
					break;
				}

				dev_priv->fence_regs[obj->fence_reg].gpu = true;
			}

			entry->offset = obj->gtt_offset;
		}

		while (i--)
			i915_gem_object_unpin(object_list[i]);

		if (ret == 0)
			break;

		if (ret != -ENOSPC || retry)
			return ret;

		ret = i915_gem_evict_everything(dev);
		if (ret)
			return ret;
	}
	}


	return 0;
	return 0;
@@ -3551,7 +3571,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,


static int
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file_priv,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
		       struct drm_i915_gem_execbuffer2 *args,
		       struct drm_i915_gem_exec_object2 *exec_list)
		       struct drm_i915_gem_exec_object2 *exec_list)
{
{
@@ -3561,9 +3581,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	struct drm_i915_gem_object *obj_priv;
	struct drm_i915_gem_object *obj_priv;
	struct drm_clip_rect *cliprects = NULL;
	struct drm_clip_rect *cliprects = NULL;
	struct drm_i915_gem_request *request = NULL;
	struct drm_i915_gem_request *request = NULL;
	int ret, i, pinned = 0;
	int ret, i, flips;
	uint64_t exec_offset;
	uint64_t exec_offset;
	int pin_tries, flips;


	struct intel_ring_buffer *ring = NULL;
	struct intel_ring_buffer *ring = NULL;


@@ -3639,7 +3658,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,


	/* Look up object handles */
	/* Look up object handles */
	for (i = 0; i < args->buffer_count; i++) {
	for (i = 0; i < args->buffer_count; i++) {
		object_list[i] = drm_gem_object_lookup(dev, file_priv,
		object_list[i] = drm_gem_object_lookup(dev, file,
						       exec_list[i].handle);
						       exec_list[i].handle);
		if (object_list[i] == NULL) {
		if (object_list[i] == NULL) {
			DRM_ERROR("Invalid object handle %d at index %d\n",
			DRM_ERROR("Invalid object handle %d at index %d\n",
@@ -3662,63 +3681,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		obj_priv->in_execbuffer = true;
		obj_priv->in_execbuffer = true;
	}
	}


	/* Pin and relocate */
	/* Move the objects en-masse into the GTT, evicting if necessary. */
	for (pin_tries = 0; ; pin_tries++) {
	ret = i915_gem_execbuffer_pin(dev, file,
		ret = 0;
				      object_list, exec_list,

				      args->buffer_count);
		for (i = 0; i < args->buffer_count; i++) {
			object_list[i]->pending_read_domains = 0;
			object_list[i]->pending_write_domain = 0;
			ret = i915_gem_object_pin_and_relocate(object_list[i],
							       file_priv,
							       &exec_list[i]);
	if (ret)
	if (ret)
				break;
			pinned = i + 1;
		}
		/* success */
		if (ret == 0)
			break;

		/* error other than GTT full, or we've already tried again */
		if (ret != -ENOSPC || pin_tries >= 1) {
			if (ret != -ERESTARTSYS) {
				unsigned long long total_size = 0;
				int num_fences = 0;
				for (i = 0; i < args->buffer_count; i++) {
					obj_priv = to_intel_bo(object_list[i]);

					total_size += object_list[i]->size;
					num_fences +=
						exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
						obj_priv->tiling_mode != I915_TILING_NONE;
				}
				DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
					  pinned+1, args->buffer_count,
					  total_size, num_fences,
					  ret);
				DRM_ERROR("%u objects [%u pinned, %u GTT], "
					  "%zu object bytes [%zu pinned], "
					  "%zu /%zu gtt bytes\n",
					  dev_priv->mm.object_count,
					  dev_priv->mm.pin_count,
					  dev_priv->mm.gtt_count,
					  dev_priv->mm.object_memory,
					  dev_priv->mm.pin_memory,
					  dev_priv->mm.gtt_memory,
					  dev_priv->mm.gtt_total);
			}
		goto err;
		goto err;
		}


		/* unpin all of our buffers */
	/* The objects are in their final locations, apply the relocations. */
		for (i = 0; i < pinned; i++)
	for (i = 0; i < args->buffer_count; i++) {
			i915_gem_object_unpin(object_list[i]);
		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
		pinned = 0;
		obj->base.pending_read_domains = 0;

		obj->base.pending_write_domain = 0;
		/* evict everyone we can from the aperture */
		ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
		ret = i915_gem_evict_everything(dev);
		if (ret)
		if (ret && ret != -ENOSPC)
			goto err;
			goto err;
	}
	}


@@ -3731,8 +3707,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	}
	}
	batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
	batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;


	/* Sanity check the batch buffer, prior to moving objects */
	/* Sanity check the batch buffer */
	exec_offset = exec_list[args->buffer_count - 1].offset;
	exec_offset = to_intel_bo(batch_obj)->gtt_offset;
	ret = i915_gem_check_execbuffer(args, exec_offset);
	ret = i915_gem_check_execbuffer(args, exec_offset);
	if (ret != 0) {
	if (ret != 0) {
		DRM_ERROR("execbuf with invalid offset/length\n");
		DRM_ERROR("execbuf with invalid offset/length\n");
@@ -3761,7 +3737,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
			 dev->invalidate_domains,
			 dev->invalidate_domains,
			 dev->flush_domains);
			 dev->flush_domains);
#endif
#endif
		i915_gem_flush(dev, file_priv,
		i915_gem_flush(dev, file,
			       dev->invalidate_domains,
			       dev->invalidate_domains,
			       dev->flush_domains,
			       dev->flush_domains,
			       dev_priv->mm.flush_rings);
			       dev_priv->mm.flush_rings);
@@ -3846,13 +3822,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		i915_gem_object_move_to_active(obj, ring);
		i915_gem_object_move_to_active(obj, ring);
	}
	}


	i915_add_request(dev, file_priv, request, ring);
	i915_add_request(dev, file, request, ring);
	request = NULL;
	request = NULL;


err:
err:
	for (i = 0; i < pinned; i++)
		i915_gem_object_unpin(object_list[i]);

	for (i = 0; i < args->buffer_count; i++) {
	for (i = 0; i < args->buffer_count; i++) {
		if (object_list[i]) {
		if (object_list[i]) {
			obj_priv = to_intel_bo(object_list[i]);
			obj_priv = to_intel_bo(object_list[i]);