Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3bd40735 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Consolidate get_fence with pin_fence



Following the pattern now used for obj->mm.pages, use just pin_fence and
unpin_fence to control access to the fence registers. I.e. instead of
calling get_fence(); pin_fence(), we now just need to call pin_fence().
This will make it easier to reduce the locking requirements around
fence registers.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-2-chris@chris-wilson.co.uk


Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent b4563f59
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -3759,8 +3759,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
}

/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
+2 −1
Original line number Diff line number Diff line
@@ -1910,7 +1910,7 @@ int i915_gem_fault(struct vm_fault *vmf)
	if (ret)
		goto err_unpin;

	ret = i915_vma_get_fence(vma);
	ret = i915_vma_pin_fence(vma);
	if (ret)
		goto err_unpin;

@@ -1926,6 +1926,7 @@ int i915_gem_fault(struct vm_fault *vmf)
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);

	i915_vma_unpin_fence(vma);
err_unpin:
	__i915_vma_unpin(vma);
err_unlock:
+5 −5
Original line number Diff line number Diff line
@@ -367,12 +367,12 @@ eb_pin_vma(struct i915_execbuffer *eb,
		return false;

	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
		if (unlikely(i915_vma_get_fence(vma))) {
		if (unlikely(i915_vma_pin_fence(vma))) {
			i915_vma_unpin(vma);
			return false;
		}

		if (i915_vma_pin_fence(vma))
		if (vma->fence)
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
	}

@@ -385,7 +385,7 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));

	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
		i915_vma_unpin_fence(vma);
		__i915_vma_unpin_fence(vma);

	__i915_vma_unpin(vma);
}
@@ -563,13 +563,13 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
	}

	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
		err = i915_vma_get_fence(vma);
		err = i915_vma_pin_fence(vma);
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

		if (i915_vma_pin_fence(vma))
		if (vma->fence)
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
	}

+28 −5
Original line number Diff line number Diff line
@@ -280,8 +280,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
 *
 * 0 on success, negative error code on failure.
 */
int
i915_vma_put_fence(struct i915_vma *vma)
int i915_vma_put_fence(struct i915_vma *vma)
{
	struct drm_i915_fence_reg *fence = vma->fence;

@@ -299,6 +298,8 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
	struct drm_i915_fence_reg *fence;

	list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);

		if (fence->pin_count)
			continue;

@@ -313,7 +314,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}

/**
 * i915_vma_get_fence - set up fencing for a vma
 * i915_vma_pin_fence - set up fencing for a vma
 * @vma: vma to map through a fence reg
 *
 * When mapping objects through the GTT, userspace wants to be able to write
@@ -331,10 +332,11 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
 * 0 on success, negative error code on failure.
 */
int
i915_vma_get_fence(struct i915_vma *vma)
i915_vma_pin_fence(struct i915_vma *vma)
{
	struct drm_i915_fence_reg *fence;
	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
	int err;

	/* Note that we revoke fences on runtime suspend. Therefore the user
	 * must keep the device awake whilst using the fence.
@@ -344,6 +346,8 @@ i915_vma_get_fence(struct i915_vma *vma)
	/* Just update our place in the LRU if our fence is getting reused. */
	if (vma->fence) {
		fence = vma->fence;
		GEM_BUG_ON(fence->vma != vma);
		fence->pin_count++;
		if (!fence->dirty) {
			list_move_tail(&fence->link,
				       &fence->i915->mm.fence_list);
@@ -353,10 +357,25 @@ i915_vma_get_fence(struct i915_vma *vma)
		fence = fence_find(vma->vm->i915);
		if (IS_ERR(fence))
			return PTR_ERR(fence);

		GEM_BUG_ON(fence->pin_count);
		fence->pin_count++;
	} else
		return 0;

	return fence_update(fence, set);
	err = fence_update(fence, set);
	if (err)
		goto out_unpin;

	GEM_BUG_ON(fence->vma != set);
	GEM_BUG_ON(vma->fence != (set ? fence : NULL));

	if (set)
		return 0;

out_unpin:
	fence->pin_count--;
	return err;
}

/**
@@ -429,6 +448,8 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		GEM_BUG_ON(fence->vma && fence->vma->fence != fence);

		if (fence->vma)
			i915_gem_release_mmap(fence->vma->obj);
	}
@@ -450,6 +471,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
		struct i915_vma *vma = reg->vma;

		GEM_BUG_ON(vma && vma->fence != reg);

		/*
		 * Commit delayed tiling changes if we have an object still
		 * attached to the fence, otherwise just clear the fence.
+1 −3
Original line number Diff line number Diff line
@@ -309,12 +309,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)

	__i915_vma_pin(vma);

	err = i915_vma_get_fence(vma);
	err = i915_vma_pin_fence(vma);
	if (err)
		goto err_unpin;

	i915_vma_pin_fence(vma);

	return ptr;

err_unpin:
Loading