Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ec2c7fc authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-fixes-2013-12-02' of...

Merge tag 'drm-intel-fixes-2013-12-02' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes

Just flushing out my pile of bugfixes, most of them for regressions/cc:
stable. Nothing really serious going on.

For outstanding issues we still have the S4 fun due to the hsw S4
duct-tape pending (seems like I need to switch into angry maintainer mode
on that one). And there's the mode merging revert to make my g33 work
again still pending for drm core. For that one I don't have any more clue
(and it looks like no one else has a good idea either). And apparently the
locking WARN fix in here also needs to be replicated for boot, still
confirming that one though.

* tag 'drm-intel-fixes-2013-12-02' of git://people.freedesktop.org/~danvet/drm-intel:
  drm/i915: Pin pages whilst allocating for dma-buf vmap()
  drm/i915: MI_PREDICATE_RESULT_2 is HSW only
  drm/i915: Make the DERRMR SRM target global GTT
  drm/i915: use the correct force_wake function at the PC8 code
  drm/i915: Fix pipe CSC post offset calculation
  drm/i915: Simplify DP vs. eDP detection
  drm/i915: Check VBT for eDP ports on VLV
  drm/i915: use crtc_htotal in watermark calculations to match fastboot v2
  drm/i915: Pin relocations for the duration of constructing the execbuffer
  drm/i915: take mode config lock around crtc disable at suspend
  drm/i915: Prefer setting PTE cache age to 3
  drm/i915/ddi: set sink to power down mode on dp disable
parents 7bc494a9 993fc6eb
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
		 * Disable CRTCs directly since we want to preserve sw state
		 * for _thaw.
		 */
		mutex_lock(&dev->mode_config.mutex);
		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
			dev_priv->display.crtc_disable(crtc);
		mutex_unlock(&dev->mode_config.mutex);

		intel_modeset_suspend_hw(dev);
	}
+3 −4
Original line number Diff line number Diff line
@@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev)
	if (dev_priv->ellc_size)
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));

	if (IS_HSW_GT3(dev))
		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
	else
		I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
	if (IS_HASWELL(dev))
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);

	if (HAS_PCH_NOP(dev)) {
		u32 temp = I915_READ(GEN7_MSG_CTL);
+8 −5
Original line number Diff line number Diff line
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto error;
		goto err;

	i915_gem_object_pin_pages(obj);

	ret = -ENOMEM;

	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
	if (pages == NULL)
		goto error;
		goto err_unpin;

	i = 0;
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
	drm_free_large(pages);

	if (!obj->dma_buf_vmapping)
		goto error;
		goto err_unpin;

	obj->vmapping_count = 1;
	i915_gem_object_pin_pages(obj);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return obj->dma_buf_vmapping;

error:
err_unpin:
	i915_gem_object_unpin_pages(obj);
err:
	mutex_unlock(&dev->struct_mutex);
	return ERR_PTR(ret);
}
+32 −28
Original line number Diff line number Diff line
@@ -33,6 +33,9 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>

#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)

struct eb_vmas {
	struct list_head vmas;
	int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
	}
}

static void eb_destroy(struct eb_vmas *eb) {
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
		i915_gem_object_unpin(obj);

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static void eb_destroy(struct eb_vmas *eb)
{
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;

@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
				       struct i915_vma,
				       exec_list);
		list_del_init(&vma->exec_list);
		i915_gem_execbuffer_unreserve_vma(vma);
		drm_gem_object_unreference(&vma->obj->base);
	}
	kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
	return ret;
}

#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)

static int
need_reloc_mappable(struct i915_vma *vma)
{
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
	return 0;
}

static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
		i915_gem_object_unpin(obj);

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
			    struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
				goto err;
		}

err:		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

err:
		if (ret != -ENOSPC || retry++)
			return ret;

		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

		ret = i915_gem_evict_vm(vm, true);
		if (ret)
			return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
		i915_gem_execbuffer_unreserve_vma(vma);
		drm_gem_object_unreference(&vma->obj->base);
	}

+4 −2
Original line number Diff line number Diff line
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
#define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)

#define GEN8_PTES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
		pte |= HSW_WT_ELLC_LLC_AGE0;
		pte |= HSW_WT_ELLC_LLC_AGE3;
		break;
	default:
		pte |= HSW_WB_ELLC_LLC_AGE0;
		pte |= HSW_WB_ELLC_LLC_AGE3;
		break;
	}

Loading