Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d1b851fc authored by Zou Nan hai's avatar Zou Nan hai Committed by Eric Anholt
Browse files

drm/i915: implement BSD ring buffer V2



The BSD (bit stream decoder) ring is used for accessing the BSD engine
which decodes video bitstream for H.264 and VC1 on G45+.  It is
asynchronous with the render ring and has access to separate parts of
the GPU from it, though the render cache is coherent between the two.

Signed-off-by: default avatarZou Nan hai <nanhai.zou@intel.com>
Signed-off-by: default avatarXiang Hai hao <haihao.xiang@intel.com>
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
parent 852835f3
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -130,6 +130,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
		drm_irq_uninstall(dev);

	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
	if (HAS_BSD(dev))
		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);

	/* Clear the HWS virtual address at teardown */
	if (I915_NEED_GFX_HWS(dev))
+2 −0
Original line number Diff line number Diff line
@@ -235,6 +235,7 @@ typedef struct drm_i915_private {

	struct pci_dev *bridge_dev;
	struct intel_ring_buffer render_ring;
	struct intel_ring_buffer bsd_ring;

	drm_dma_handle_t *status_page_dmah;
	void *hw_status_page;
@@ -1121,6 +1122,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
			 (dev)->pci_device == 0x2A42 ||		\
			 (dev)->pci_device == 0x2E42)

#define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)

/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+94 −13
Original line number Diff line number Diff line
@@ -1792,8 +1792,13 @@ i915_gem_retire_work_handler(struct work_struct *work)
	mutex_lock(&dev->struct_mutex);
	i915_gem_retire_requests(dev, &dev_priv->render_ring);

	if (HAS_BSD(dev))
		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

	if (!dev_priv->mm.suspended &&
			(!list_empty(&dev_priv->render_ring.request_list)))
		(!list_empty(&dev_priv->render_ring.request_list) ||
			(HAS_BSD(dev) &&
			 !list_empty(&dev_priv->bsd_ring.request_list))))
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
	mutex_unlock(&dev->struct_mutex);
}
@@ -1883,6 +1888,11 @@ i915_gem_flush(struct drm_device *dev,
	dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
			invalidate_domains,
			flush_domains);

	if (HAS_BSD(dev))
		dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
				invalidate_domains,
				flush_domains);
}

static void
@@ -2039,12 +2049,14 @@ i915_gpu_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	bool lists_empty;
	uint32_t seqno;
	uint32_t seqno1, seqno2;
	int ret;

	spin_lock(&dev_priv->mm.active_list_lock);
	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
		      list_empty(&dev_priv->render_ring.active_list);
	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
		       list_empty(&dev_priv->render_ring.active_list) &&
		       (!HAS_BSD(dev) ||
			list_empty(&dev_priv->bsd_ring.active_list)));
	spin_unlock(&dev_priv->mm.active_list_lock);

	if (lists_empty)
@@ -2052,11 +2064,23 @@ i915_gpu_idle(struct drm_device *dev)

	/* Flush everything onto the inactive list. */
	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
	seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
			&dev_priv->render_ring);
	if (seqno == 0)
	if (seqno1 == 0)
		return -ENOMEM;
	ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
	ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);

	if (HAS_BSD(dev)) {
		seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
				&dev_priv->bsd_ring);
		if (seqno2 == 0)
			return -ENOMEM;

		ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
		if (ret)
			return ret;
	}


	return ret;
}
@@ -2071,7 +2095,9 @@ i915_gem_evict_everything(struct drm_device *dev)
	spin_lock(&dev_priv->mm.active_list_lock);
	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
		       list_empty(&dev_priv->mm.flushing_list) &&
		       list_empty(&dev_priv->render_ring.active_list));
		       list_empty(&dev_priv->render_ring.active_list) &&
		       (!HAS_BSD(dev)
			|| list_empty(&dev_priv->bsd_ring.active_list)));
	spin_unlock(&dev_priv->mm.active_list_lock);

	if (lists_empty)
@@ -2091,7 +2117,9 @@ i915_gem_evict_everything(struct drm_device *dev)
	spin_lock(&dev_priv->mm.active_list_lock);
	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
		       list_empty(&dev_priv->mm.flushing_list) &&
		       list_empty(&dev_priv->render_ring.active_list));
		       list_empty(&dev_priv->render_ring.active_list) &&
		       (!HAS_BSD(dev)
			|| list_empty(&dev_priv->bsd_ring.active_list)));
	spin_unlock(&dev_priv->mm.active_list_lock);
	BUG_ON(!lists_empty);

@@ -2106,9 +2134,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
	int ret;

	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
	struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
	for (;;) {
		i915_gem_retire_requests(dev, render_ring);

		if (HAS_BSD(dev))
			i915_gem_retire_requests(dev, bsd_ring);

		/* If there's an inactive buffer available now, grab it
		 * and be done.
		 */
@@ -2146,6 +2178,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
			continue;
		}

		if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
			struct drm_i915_gem_request *request;

			request = list_first_entry(&bsd_ring->request_list,
						   struct drm_i915_gem_request,
						   list);

			ret = i915_wait_request(dev,
					request->seqno, request->ring);
			if (ret)
				return ret;

			continue;
		}

		/* If we didn't have anything on the request list but there
		 * are buffers awaiting a flush, emit one and try again.
		 * When we wait on it, those buffers waiting for that flush
@@ -3641,6 +3688,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
	if (args->flags & I915_EXEC_BSD) {
		if (!HAS_BSD(dev)) {
			DRM_ERROR("execbuf with wrong flag\n");
			return -EINVAL;
		}
		ring = &dev_priv->bsd_ring;
	} else {
		ring = &dev_priv->render_ring;
	}


	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
@@ -3694,8 +3751,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		goto pre_mutex_err;
	}

	ring = &dev_priv->render_ring;

	/* Look up object handles */
	flips = 0;
	for (i = 0; i < args->buffer_count; i++) {
@@ -3834,6 +3889,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
					dev->flush_domains,
					&dev_priv->render_ring);

			if (HAS_BSD(dev))
				(void)i915_add_request(dev, file_priv,
						dev->flush_domains,
						&dev_priv->bsd_ring);
		}
	}

@@ -4267,6 +4326,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
	 */
	i915_gem_retire_requests(dev, &dev_priv->render_ring);

	if (HAS_BSD(dev))
		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

	obj_priv = to_intel_bo(obj);
	/* Don't count being on the flushing list against the object being
	 * done.  Otherwise, a buffer left on the flushing list but not getting
@@ -4433,7 +4495,9 @@ i915_gem_idle(struct drm_device *dev)
	mutex_lock(&dev->struct_mutex);

	if (dev_priv->mm.suspended ||
			dev_priv->render_ring.gem_object == NULL) {
			(dev_priv->render_ring.gem_object == NULL) ||
			(HAS_BSD(dev) &&
			 dev_priv->bsd_ring.gem_object == NULL)) {
		mutex_unlock(&dev->struct_mutex);
		return 0;
	}
@@ -4550,6 +4614,10 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
			return ret;
	}
	ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
	if (!ret && HAS_BSD(dev)) {
		dev_priv->bsd_ring = bsd_ring;
		ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
	}
	return ret;
}

@@ -4559,6 +4627,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
	drm_i915_private_t *dev_priv = dev->dev_private;

	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
	if (HAS_BSD(dev))
		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
	if (HAS_PIPE_CONTROL(dev))
		i915_gem_cleanup_pipe_control(dev);
}
@@ -4589,11 +4659,13 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,

	spin_lock(&dev_priv->mm.active_list_lock);
	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
	spin_unlock(&dev_priv->mm.active_list_lock);

	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
	mutex_unlock(&dev->struct_mutex);

	drm_irq_install(dev);
@@ -4638,6 +4710,10 @@ i915_gem_load(struct drm_device *dev)
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
	if (HAS_BSD(dev)) {
		INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
		INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
	}
	for (i = 0; i < 16; i++)
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -4874,6 +4950,8 @@ i915_gpu_is_active(struct drm_device *dev)
	spin_lock(&dev_priv->mm.active_list_lock);
	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
		      list_empty(&dev_priv->render_ring.active_list);
	if (HAS_BSD(dev))
		lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
	spin_unlock(&dev_priv->mm.active_list_lock);

	return !lists_empty;
@@ -4920,6 +4998,9 @@ rescan:
		spin_unlock(&shrink_list_lock);
		i915_gem_retire_requests(dev, &dev_priv->render_ring);

		if (HAS_BSD(dev))
			i915_gem_retire_requests(dev, &dev_priv->bsd_ring);

		list_for_each_entry_safe(obj_priv, next_obj,
					 &dev_priv->mm.inactive_list,
					 list) {
+11 −2
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@
	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)

/** Interrupts that we mask and unmask at runtime. */
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)

#define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
				 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -362,6 +362,9 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
		dev_priv->hangcheck_count = 0;
		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
	}
	if (gt_iir & GT_BSD_USER_INTERRUPT)
		DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);


	if (de_iir & DE_GSE)
		ironlake_opregion_gse_intr(dev);
@@ -944,6 +947,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
		}

		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);

		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
			intel_prepare_page_flip(dev, 0);

@@ -1297,7 +1303,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
	/* enable kind of interrupts always enabled */
	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
	u32 render_mask = GT_PIPE_NOTIFY;
	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;

@@ -1376,6 +1382,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)

	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);

	if (HAS_BSD(dev))
		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);

	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;

	if (HAS_PCH_SPLIT(dev))
+14 −0
Original line number Diff line number Diff line
@@ -334,6 +334,7 @@
#define   I915_DEBUG_INTERRUPT				(1<<2)
#define   I915_USER_INTERRUPT				(1<<1)
#define   I915_ASLE_INTERRUPT				(1<<0)
#define   I915_BSD_USER_INTERRUPT                      (1<<25)
#define EIR		0x020b0
#define EMR		0x020b4
#define ESR		0x020b8
@@ -368,6 +369,17 @@
#define BB_ADDR		0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL	0x02170 /* 915+ only */

/*
 * BSD (bit stream decoder instruction and interrupt control register defines
 * (G4X and Ironlake only)
 */

#define BSD_RING_TAIL          0x04030
#define BSD_RING_HEAD          0x04034
#define BSD_RING_START         0x04038
#define BSD_RING_CTL           0x0403c
#define BSD_RING_ACTHD         0x04074
#define BSD_HWS_PGA            0x04080

/*
 * Framebuffer compression (915+ only)
@@ -2355,6 +2367,8 @@
#define GT_PIPE_NOTIFY		(1 << 4)
#define GT_SYNC_STATUS          (1 << 2)
#define GT_USER_INTERRUPT       (1 << 0)
#define GT_BSD_USER_INTERRUPT   (1 << 5)


#define GTISR   0x44010
#define GTIMR   0x44014
Loading