Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 058d88c4 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Track pinned VMA



Treat the VMA as the primary struct responsible for tracking bindings
into the GPU's VM. That is we want to treat the VMA returned after we
pin an object into the VM as the cookie we hold and eventually release
when unpinning. Doing so eliminates the ambiguity in pinning the object
and then searching for the relevant pin later.

v2: Joonas' stylistic nitpicks, a fun rebase.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1471254551-25805-27-git-send-email-chris@chris-wilson.co.uk
parent 19880c4a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -105,7 +105,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)

static char get_global_flag(struct drm_i915_gem_object *obj)
{
	return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
	return i915_gem_object_to_ggtt(obj, NULL) ?  'g' : ' ';
}

static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+15 −45
Original line number Diff line number Diff line
@@ -3075,7 +3075,7 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data(
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);

int __must_check
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
			 u64 size,
@@ -3279,12 +3279,11 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
				  bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
				     const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
					      const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
				int align);
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -3304,63 +3303,34 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
				struct drm_gem_object *gem_obj, int flags);

u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
				  const struct i915_ggtt_view *view);
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
			struct i915_address_space *vm);
static inline u64
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
}

bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
				  const struct i915_ggtt_view *view);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
			struct i915_address_space *vm);

struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
		     struct i915_address_space *vm,
		     const struct i915_ggtt_view *view);

struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
				  struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
				  struct i915_address_space *vm,
				  const struct i915_ggtt_view *view);

static inline struct i915_vma *
i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
}
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);

/* Some GGTT VM helpers */
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
	return container_of(vm, struct i915_hw_ppgtt, base);
}

static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
static inline struct i915_vma *
i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
			const struct i915_ggtt_view *view)
{
	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
	return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
}

unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);

void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
				     const struct i915_ggtt_view *view);
static inline void
i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
static inline unsigned long
i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
			    const struct i915_ggtt_view *view)
{
	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
	return i915_gem_object_to_ggtt(o, view)->node.start;
}

/* i915_gem_fence.c */
+62 −171
Original line number Diff line number Diff line
@@ -746,14 +746,15 @@ i915_gem_gtt_pread(struct drm_device *dev,
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct i915_vma *vma;
	struct drm_mm_node node;
	char __user *user_data;
	uint64_t remain;
	uint64_t offset;
	int ret;

	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
	if (ret) {
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
	if (IS_ERR(vma)) {
		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
		if (ret)
			goto out;
@@ -766,7 +767,7 @@ i915_gem_gtt_pread(struct drm_device *dev,

		i915_gem_object_pin_pages(obj);
	} else {
		node.start = i915_gem_obj_ggtt_offset(obj);
		node.start = vma->node.start;
		node.allocated = false;
		ret = i915_gem_object_put_fence(obj);
		if (ret)
@@ -847,7 +848,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
		i915_gem_object_unpin_pages(obj);
		remove_mappable_node(&node);
	} else {
		i915_gem_object_ggtt_unpin(obj);
		i915_vma_unpin(vma);
	}
out:
	return ret;
@@ -1045,6 +1046,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
{
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_device *dev = obj->base.dev;
	struct i915_vma *vma;
	struct drm_mm_node node;
	uint64_t remain, offset;
	char __user *user_data;
@@ -1054,9 +1056,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
	if (i915_gem_object_is_tiled(obj))
		return -EFAULT;

	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
				       PIN_MAPPABLE | PIN_NONBLOCK);
	if (ret) {
	if (IS_ERR(vma)) {
		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
		if (ret)
			goto out;
@@ -1069,7 +1071,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,

		i915_gem_object_pin_pages(obj);
	} else {
		node.start = i915_gem_obj_ggtt_offset(obj);
		node.start = vma->node.start;
		node.allocated = false;
		ret = i915_gem_object_put_fence(obj);
		if (ret)
@@ -1157,7 +1159,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
		i915_gem_object_unpin_pages(obj);
		remove_mappable_node(&node);
	} else {
		i915_gem_object_ggtt_unpin(obj);
		i915_vma_unpin(vma);
	}
out:
	return ret;
@@ -1625,7 +1627,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,

/**
 * i915_gem_fault - fault a page into the GTT
 * @vma: VMA in question
 * @area: CPU VMA in question
 * @vmf: fault info
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
@@ -1639,20 +1641,21 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 * suffer if the GTT working set is large or there are few fence registers
 * left.
 */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct i915_ggtt_view view = i915_ggtt_view_normal;
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
	struct i915_vma *vma;
	pgoff_t page_offset;
	unsigned long pfn;
	int ret;

	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
		PAGE_SHIFT;

	trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -1689,14 +1692,16 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
		view.params.partial.size =
			min_t(unsigned int,
			      chunk_size,
			      (vma->vm_end - vma->vm_start)/PAGE_SIZE -
			      (area->vm_end - area->vm_start) / PAGE_SIZE -
			      view.params.partial.offset);
	}

	/* Now pin it into the GTT if needed */
	ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	if (ret)
	vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_unlock;
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
@@ -1707,8 +1712,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
		goto err_unpin;

	/* Finally, remap it using the new GTT offset */
	pfn = ggtt->mappable_base +
		i915_gem_obj_ggtt_offset_view(obj, &view);
	pfn = ggtt->mappable_base + vma->node.start;
	pfn >>= PAGE_SHIFT;

	if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
@@ -1717,12 +1721,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
		 * is due to userspace losing part of the mapping or never
		 * having accessed it before (at this partials' range).
		 */
		unsigned long base = vma->vm_start +
		unsigned long base = area->vm_start +
				     (view.params.partial.offset << PAGE_SHIFT);
		unsigned int i;

		for (i = 0; i < view.params.partial.size; i++) {
			ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
			ret = vm_insert_pfn(area,
					    base + i * PAGE_SIZE,
					    pfn + i);
			if (ret)
				break;
		}
@@ -1730,14 +1736,16 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
		obj->fault_mappable = true;
	} else {
		if (!obj->fault_mappable) {
			unsigned long size = min_t(unsigned long,
						   vma->vm_end - vma->vm_start,
						   obj->base.size);
			unsigned long size =
				min_t(unsigned long,
				      area->vm_end - area->vm_start,
				      obj->base.size) >> PAGE_SHIFT;
			unsigned long base = area->vm_start;
			int i;

			for (i = 0; i < size >> PAGE_SHIFT; i++) {
				ret = vm_insert_pfn(vma,
						    (unsigned long)vma->vm_start + i * PAGE_SIZE,
			for (i = 0; i < size; i++) {
				ret = vm_insert_pfn(area,
						    base + i * PAGE_SIZE,
						    pfn + i);
				if (ret)
					break;
@@ -1745,12 +1753,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)

			obj->fault_mappable = true;
		} else
			ret = vm_insert_pfn(vma,
			ret = vm_insert_pfn(area,
					    (unsigned long)vmf->virtual_address,
					    pfn + page_offset);
	}
err_unpin:
	i915_gem_object_ggtt_unpin_view(obj, &view);
	__i915_vma_unpin(vma);
err_unlock:
	mutex_unlock(&dev->struct_mutex);
err_rpm:
@@ -3235,7 +3243,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
					    old_write_domain);

	/* And bump the LRU for this access */
	vma = i915_gem_obj_to_ggtt(obj);
	vma = i915_gem_object_to_ggtt(obj, NULL);
	if (vma &&
	    drm_mm_node_allocated(&vma->node) &&
	    !i915_vma_is_active(vma))
@@ -3459,11 +3467,12 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
 */
int
struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
				     const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;
	u32 old_read_domains, old_write_domain;
	int ret;

@@ -3483,19 +3492,23 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
	 */
	ret = i915_gem_object_set_cache_level(obj,
					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
	if (ret)
	if (ret) {
		vma = ERR_PTR(ret);
		goto err_unpin_display;
	}

	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
	 * always use map_and_fenceable for all scanout buffers.
	 */
	ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
	vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
				       view->type == I915_GGTT_VIEW_NORMAL ?
				       PIN_MAPPABLE : 0);
	if (ret)
	if (IS_ERR(vma))
		goto err_unpin_display;

	WARN_ON(obj->pin_display > i915_vma_pin_count(vma));

	i915_gem_object_flush_cpu_write_domain(obj);

	old_write_domain = obj->base.write_domain;
@@ -3511,23 +3524,23 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
					    old_read_domains,
					    old_write_domain);

	return 0;
	return vma;

err_unpin_display:
	obj->pin_display--;
	return ret;
	return vma;
}

void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
					 const struct i915_ggtt_view *view)
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
	if (WARN_ON(obj->pin_display == 0))
	if (WARN_ON(vma->obj->pin_display == 0))
		return;

	i915_gem_object_ggtt_unpin_view(obj, view);
	vma->obj->pin_display--;

	obj->pin_display--;
	i915_vma_unpin(vma);
	WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
}

/**
@@ -3724,27 +3737,25 @@ int __i915_vma_do_pin(struct i915_vma *vma,
	return ret;
}

int
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
			 u64 size,
			 u64 alignment,
			 u64 flags)
{
	struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
	struct i915_vma *vma;
	int ret;

	if (!view)
		view = &i915_ggtt_view_normal;

	vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
	if (IS_ERR(vma))
		return PTR_ERR(vma);
		return vma;

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
		if (flags & PIN_NONBLOCK &&
		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
			return -ENOSPC;
			return ERR_PTR(-ENOSPC);

		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
@@ -3757,17 +3768,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
		     obj->map_and_fenceable);
		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;
			return ERR_PTR(ret);
	}

	return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
}
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);

void
i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
				const struct i915_ggtt_view *view)
{
	i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
	return vma;
}

static __always_inline unsigned int __busy_read_flag(unsigned int id)
@@ -4153,32 +4161,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
	intel_runtime_pm_put(dev_priv);
}

struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
				     struct i915_address_space *vm)
{
	struct i915_vma *vma;
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
		    vma->vm == vm)
			return vma;
	}
	return NULL;
}

struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
					   const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;

	GEM_BUG_ON(!view);

	list_for_each_entry(vma, &obj->vma_list, obj_link)
		if (i915_vma_is_ggtt(vma) &&
		    i915_ggtt_view_equal(&vma->ggtt_view, view))
			return vma;
	return NULL;
}

int i915_gem_suspend(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4646,97 +4628,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
	}
}

/* All the new VM stuff */
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
			struct i915_address_space *vm)
{
	struct drm_i915_private *dev_priv = to_i915(o->base.dev);
	struct i915_vma *vma;

	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);

	list_for_each_entry(vma, &o->vma_list, obj_link) {
		if (i915_vma_is_ggtt(vma) &&
		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
			continue;
		if (vma->vm == vm)
			return vma->node.start;
	}

	WARN(1, "%s vma for this object not found.\n",
	     i915_is_ggtt(vm) ? "global" : "ppgtt");
	return -1;
}

u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
				  const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;

	list_for_each_entry(vma, &o->vma_list, obj_link)
		if (i915_vma_is_ggtt(vma) &&
		    i915_ggtt_view_equal(&vma->ggtt_view, view))
			return vma->node.start;

	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
	return -1;
}

bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
			struct i915_address_space *vm)
{
	struct i915_vma *vma;

	list_for_each_entry(vma, &o->vma_list, obj_link) {
		if (i915_vma_is_ggtt(vma) &&
		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
			continue;
		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
			return true;
	}

	return false;
}

bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
				  const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;

	list_for_each_entry(vma, &o->vma_list, obj_link)
		if (i915_vma_is_ggtt(vma) &&
		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
		    drm_mm_node_allocated(&vma->node))
			return true;

	return false;
}

unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
	struct i915_vma *vma;

	GEM_BUG_ON(list_empty(&o->vma_list));

	list_for_each_entry(vma, &o->vma_list, obj_link) {
		if (i915_vma_is_ggtt(vma) &&
		    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
			return vma->node.size;
	}

	return 0;
}

bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;
	list_for_each_entry(vma, &obj->vma_list, obj_link)
		if (i915_vma_is_pinned(vma))
			return true;

	return false;
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
+35 −30
Original line number Diff line number Diff line
@@ -180,8 +180,8 @@ eb_lookup_vmas(struct eb_vmas *eb,
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
		if (IS_ERR(vma)) {
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
		if (unlikely(IS_ERR(vma))) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
			goto err;
@@ -349,30 +349,33 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct i915_vma *vma;
	uint64_t delta = relocation_target(reloc, target_offset);
	uint64_t offset;
	void __iomem *reloc_page;
	int ret;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;
		goto unpin;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;
		goto unpin;

	/* Map the page containing the relocation we're going to perform.  */
	offset = i915_gem_obj_ggtt_offset(obj);
	offset += reloc->offset;
	offset = vma->node.start + reloc->offset;
	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
					      offset & PAGE_MASK);
	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));

	if (INTEL_INFO(dev)->gen >= 8) {
	if (INTEL_GEN(dev_priv) >= 8) {
		offset += sizeof(uint32_t);

		if (offset_in_page(offset) == 0) {
@@ -388,7 +391,9 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,

	io_mapping_unmap_atomic(reloc_page);

	return 0;
unpin:
	i915_vma_unpin(vma);
	return ret;
}

static void
@@ -1305,31 +1310,28 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
				      batch_start_offset,
				      batch_len,
				      is_master);
	if (ret)
		goto err;

	ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (ret)
		goto err;
	if (ret) {
		if (ret == -EACCES) /* unhandled chained batch */
			vma = NULL;
		else
			vma = ERR_PTR(ret);
		goto out;
	}

	i915_gem_object_unpin_pages(shadow_batch_obj);
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;

	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));

	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
	vma->exec_entry = shadow_exec_entry;
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
	i915_gem_object_get(shadow_batch_obj);
	list_add_tail(&vma->exec_list, &eb->vmas);

	return vma;

err:
out:
	i915_gem_object_unpin_pages(shadow_batch_obj);
	if (ret == -EACCES) /* unhandled chained batch */
		return NULL;
	else
		return ERR_PTR(ret);
	return vma;
}

static int
@@ -1677,6 +1679,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	 * hsw should have this fixed, but bdw mucks it up again. */
	if (dispatch_flags & I915_DISPATCH_SECURE) {
		struct drm_i915_gem_object *obj = params->batch->obj;
		struct i915_vma *vma;

		/*
		 * So on first glance it looks freaky that we pin the batch here
@@ -1688,11 +1691,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
		if (ret)
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto err;
		}

		params->batch = i915_gem_obj_to_ggtt(obj);
		params->batch = vma;
	}

	/* Allocate a request for this batch buffer nice and early. */
@@ -1708,7 +1713,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
	params->request->batch_obj = params->batch->obj;
	params->request->batch = params->batch;

	ret = i915_gem_request_add_to_client(params->request, file);
	if (ret)
+4 −10
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
	POSTING_READ(fence_reg_lo);

	if (obj) {
		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
		unsigned int tiling = i915_gem_object_get_tiling(obj);
		unsigned int stride = i915_gem_object_get_stride(obj);
		u32 size = vma->node.size;
@@ -120,7 +120,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
	u32 val;

	if (obj) {
		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
		unsigned int tiling = i915_gem_object_get_tiling(obj);
		unsigned int stride = i915_gem_object_get_stride(obj);
		int pitch_val;
@@ -161,7 +161,7 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
	u32 val;

	if (obj) {
		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
		struct i915_vma *vma = i915_gem_object_to_ggtt(obj, NULL);
		unsigned int tiling = i915_gem_object_get_tiling(obj);
		unsigned int stride = i915_gem_object_get_stride(obj);
		u32 pitch_val;
@@ -432,13 +432,7 @@ bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);

		WARN_ON(!ggtt_vma ||
			dev_priv->fence_regs[obj->fence_reg].pin_count >
			i915_vma_pin_count(ggtt_vma));
		dev_priv->fence_regs[obj->fence_reg].pin_count++;
		to_i915(obj->base.dev)->fence_regs[obj->fence_reg].pin_count++;
		return true;
	} else
		return false;
Loading