Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b5d27b9 authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/i915: Remove intel_flip_work infrastructure



This gets rid of all the interactions between the legacy flip code and
the modeset code. Yay!

This highlights an ommission in the atomic paths, where we fail to
apply a boost to the pending rendering when we miss the target vblank.
But the existing code is still dead and can be removed.

v2: Note that the boosting doesn't work in atomic (Chris).

Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170720175754.30751-7-daniel.vetter@ffwll.ch
parent fa05887a
Loading
Loading
Loading
Loading
+0 −70
Original line number Diff line number Diff line
@@ -543,75 +543,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
	return 0;
}

static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
	struct drm_device *dev = &dev_priv->drm;
	struct intel_crtc *crtc;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	for_each_intel_crtc(dev, crtc) {
		const char pipe = pipe_name(crtc->pipe);
		const char plane = plane_name(crtc->plane);
		struct intel_flip_work *work;

		spin_lock_irq(&dev->event_lock);
		work = crtc->flip_work;
		if (work == NULL) {
			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
				   pipe, plane);
		} else {
			u32 pending;
			u32 addr;

			pending = atomic_read(&work->pending);
			if (pending) {
				seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
					   pipe, plane);
			} else {
				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
					   pipe, plane);
			}
			if (work->flip_queued_req) {
				struct intel_engine_cs *engine = work->flip_queued_req->engine;

				seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
					   engine->name,
					   work->flip_queued_req->global_seqno,
					   intel_engine_last_submit(engine),
					   intel_engine_get_seqno(engine),
					   i915_gem_request_completed(work->flip_queued_req));
			} else
				seq_printf(m, "Flip not associated with any ring\n");
			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
				   work->flip_queued_vblank,
				   work->flip_ready_vblank,
				   intel_crtc_get_vblank_counter(crtc));
			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));

			if (INTEL_GEN(dev_priv) >= 4)
				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
			else
				addr = I915_READ(DSPADDR(crtc->plane));
			seq_printf(m, "Current scanout address 0x%08x\n", addr);

			if (work->pending_flip_obj) {
				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
			}
		}
		spin_unlock_irq(&dev->event_lock);
	}

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4854,7 +4785,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
	{"i915_gem_gtt", i915_gem_gtt_info, 0},
	{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
	{"i915_gem_stolen", i915_gem_stolen_list_info },
	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
	{"i915_gem_request", i915_gem_request_info, 0},
	{"i915_gem_seqno", i915_gem_seqno_info, 0},
	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
+0 −1
Original line number Diff line number Diff line
@@ -876,7 +876,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
	spin_lock_init(&dev_priv->uncore.lock);

	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->sb_lock);
	mutex_init(&dev_priv->modeset_restore_lock);
	mutex_init(&dev_priv->av_mutex);
+0 −4
Original line number Diff line number Diff line
@@ -2143,9 +2143,6 @@ struct drm_i915_private {
	/* protects the irq masks */
	spinlock_t irq_lock;

	/* protects the mmio flip data */
	spinlock_t mmio_flip_lock;

	bool display_irqs_enabled;

	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -2250,7 +2247,6 @@ struct drm_i915_private {

	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
	wait_queue_head_t pending_flip_queue;

#ifdef CONFIG_DEBUG_FS
	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+0 −2
Original line number Diff line number Diff line
@@ -4935,8 +4935,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);

	init_waitqueue_head(&dev_priv->pending_flip_queue);

	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

	spin_lock_init(&dev_priv->fb_tracking.lock);
+1 −116
Original line number Diff line number Diff line
@@ -49,11 +49,6 @@
#include <linux/dma_remapping.h>
#include <linux/reservation.h>

static bool is_mmio_work(struct intel_flip_work *work)
{
	return work->mmio_work.func;
}

/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
	DRM_FORMAT_C8,
@@ -3551,35 +3546,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
	mutex_unlock(&dev->mode_config.mutex);
}

static bool abort_flip_on_reset(struct intel_crtc *crtc)
{
	struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;

	if (i915_reset_backoff(error))
		return true;

	if (crtc->reset_count != i915_reset_count(error))
		return true;

	return false;
}

static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	bool pending;

	if (abort_flip_on_reset(intel_crtc))
		return false;

	spin_lock_irq(&dev->event_lock);
	pending = to_intel_crtc(crtc)->flip_work != NULL;
	spin_unlock_irq(&dev->event_lock);

	return pending;
}

static void intel_update_pipe_config(struct intel_crtc *crtc,
				     struct intel_crtc_state *old_crtc_state)
{
@@ -4157,57 +4123,6 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
	return false;
}

static void page_flip_completed(struct intel_crtc *intel_crtc)
{
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
	struct intel_flip_work *work = intel_crtc->flip_work;

	intel_crtc->flip_work = NULL;

	if (work->event)
		drm_crtc_send_vblank_event(&intel_crtc->base, work->event);

	drm_crtc_vblank_put(&intel_crtc->base);

	wake_up_all(&dev_priv->pending_flip_queue);
	trace_i915_flip_complete(intel_crtc->plane,
				 work->pending_flip_obj);

	queue_work(dev_priv->wq, &work->unpin_work);
}

static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	long ret;

	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));

	ret = wait_event_interruptible_timeout(
					dev_priv->pending_flip_queue,
					!intel_crtc_has_pending_flip(crtc),
					60*HZ);

	if (ret < 0)
		return ret;

	if (ret == 0) {
		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
		struct intel_flip_work *work;

		spin_lock_irq(&dev->event_lock);
		work = intel_crtc->flip_work;
		if (work && !is_mmio_work(work)) {
			WARN_ONCE(1, "Removing stuck page flip\n");
			page_flip_completed(intel_crtc);
		}
		spin_unlock_irq(&dev->event_lock);
	}

	return 0;
}

void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
	u32 temp;
@@ -5814,8 +5729,6 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
		return;

	if (crtc->primary->state->visible) {
		WARN_ON(intel_crtc->flip_work);

		intel_pre_disable_primary_noatomic(crtc);

		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
@@ -10088,35 +10001,11 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct intel_flip_work *work;

	spin_lock_irq(&dev->event_lock);
	work = intel_crtc->flip_work;
	intel_crtc->flip_work = NULL;
	spin_unlock_irq(&dev->event_lock);

	if (work) {
		cancel_work_sync(&work->mmio_work);
		cancel_work_sync(&work->unpin_work);
		kfree(work);
	}

	drm_crtc_cleanup(crtc);

	kfree(intel_crtc);
}

static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
					       struct intel_flip_work *work)
{
	work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);

	/* Ensure that the work item is consistent when activating it ... */
	smp_mb__before_atomic();
	atomic_set(&work->pending, 1);
}

/**
 * intel_wm_need_update - Check whether watermarks need updating
 * @plane: drm plane
@@ -11935,10 +11824,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
		if (state->legacy_cursor_update)
			continue;

		ret = intel_crtc_wait_for_pending_flips(crtc);
		if (ret)
			return ret;

		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
			flush_workqueue(dev_priv->wq);
	}
@@ -12712,7 +12597,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
{
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);

	intel_pipe_update_end(intel_crtc, NULL);
	intel_pipe_update_end(intel_crtc);
}

/**
Loading