Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a693ab6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "You'll be terribly disappointed in this, I'm not trying to sneak any
  features in or anything, its mostly radeon and intel fixes, a couple
  of ARM driver fixes"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (34 commits)
  drm/radeon/dpm: add debugfs support for RS780/RS880 (v3)
  drm/radeon/dpm/atom: fix broken gcc harder
  drm/radeon/dpm/atom: restructure logic to work around a compiler bug
  drm/radeon/dpm: fix atom vram table parsing
  drm/radeon: fix an endian bug in atom table parsing
  drm/radeon: add a module parameter to disable aspm
  drm/rcar-du: Use the GEM PRIME helpers
  drm/shmobile: Use the GEM PRIME helpers
  uvesafb: Really allow mtrr being 0, as documented and warn()ed
  radeon kms: do not flush uninitialized hotplug work
  drm/radeon/dpm/sumo: handle boost states properly when forcing a perf level
  drm/radeon: align VM PTBs (Page Table Blocks) to 32K
  drm/radeon: allow selection of alignment in the sub-allocator
  drm/radeon: never unpin UVD bo v3
  drm/radeon: fix UVD fence emit
  drm/radeon: add fault decode function for CIK
  drm/radeon: add fault decode function for SI (v2)
  drm/radeon: add fault decode function for cayman/TN (v2)
  drm/radeon: use radeon device for request firmware
  drm/radeon: add missing ttm_eu_backoff_reservation to radeon_bo_list_validate
  ...
parents 7a62711a 3668f0df
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060
MODULE_PARM_DESC(preliminary_hw_support,
		"Enable preliminary hardware support. (default: false)");

int i915_disable_power_well __read_mostly = 0;
int i915_disable_power_well __read_mostly = 1;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
		 "Disable the power well when possible (default: false)");
		 "Disable the power well when possible (default: true)");

int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
+33 −50
Original line number Diff line number Diff line
@@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
	u32 seqno = intel_ring_get_seqno(ring);

	BUG_ON(ring == NULL);
	if (obj->ring != ring && obj->last_write_seqno) {
		/* Keep the seqno relative to the current ring */
		obj->last_write_seqno = seqno;
	}
	obj->ring = ring;

	/* Add a reference if we're newly entering the active list. */
@@ -2653,7 +2657,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
	drm_i915_private_t *dev_priv = dev->dev_private;
	int fence_reg;
	int fence_pitch_shift;
	uint64_t val;

	if (INTEL_INFO(dev)->gen >= 6) {
		fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2663,8 +2666,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
	}

	fence_reg += reg * 8;

	/* To w/a incoherency with non-atomic 64-bit register updates,
	 * we split the 64-bit update into two 32-bit writes. In order
	 * for a partial fence not to be evaluated between writes, we
	 * precede the update with write to turn off the fence register,
	 * and only enable the fence as the last step.
	 *
	 * For extra levels of paranoia, we make sure each step lands
	 * before applying the next step.
	 */
	I915_WRITE(fence_reg, 0);
	POSTING_READ(fence_reg);

	if (obj) {
		u32 size = obj->gtt_space->size;
		uint64_t val;

		val = (uint64_t)((obj->gtt_offset + size - 4096) &
				 0xfffff000) << 32;
@@ -2673,12 +2691,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
		if (obj->tiling_mode == I915_TILING_Y)
			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
		val |= I965_FENCE_REG_VALID;
	} else
		val = 0;

	fence_reg += reg * 8;
	I915_WRITE64(fence_reg, val);
		I915_WRITE(fence_reg + 4, val >> 32);
		POSTING_READ(fence_reg + 4);

		I915_WRITE(fence_reg + 0, val);
		POSTING_READ(fence_reg);
	} else {
		I915_WRITE(fence_reg + 4, 0);
		POSTING_READ(fence_reg + 4);
	}
}

static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2796,56 +2818,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
	return fence - dev_priv->fence_regs;
}

struct write_fence {
	struct drm_device *dev;
	struct drm_i915_gem_object *obj;
	int fence;
};

static void i915_gem_write_fence__ipi(void *data)
{
	struct write_fence *args = data;

	/* Required for SNB+ with LLC */
	wbinvd();

	/* Required for VLV */
	i915_gem_write_fence(args->dev, args->fence, args->obj);
}

static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
					 struct drm_i915_fence_reg *fence,
					 bool enable)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	struct write_fence args = {
		.dev = obj->base.dev,
		.fence = fence_number(dev_priv, fence),
		.obj = enable ? obj : NULL,
	};
	int reg = fence_number(dev_priv, fence);

	/* In order to fully serialize access to the fenced region and
	 * the update to the fence register we need to take extreme
	 * measures on SNB+. In theory, the write to the fence register
	 * flushes all memory transactions before, and coupled with the
	 * mb() placed around the register write we serialise all memory
	 * operations with respect to the changes in the tiler. Yet, on
	 * SNB+ we need to take a step further and emit an explicit wbinvd()
	 * on each processor in order to manually flush all memory
	 * transactions before updating the fence register.
	 *
	 * However, Valleyview complicates matter. There the wbinvd is
	 * insufficient and unlike SNB/IVB requires the serialising
	 * register write. (Note that that register write by itself is
	 * conversely not sufficient for SNB+.) To compromise, we do both.
	 */
	if (INTEL_INFO(args.dev)->gen >= 6)
		on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
	else
		i915_gem_write_fence(args.dev, args.fence, args.obj);
	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);

	if (enable) {
		obj->fence_reg = args.fence;
		obj->fence_reg = reg;
		fence->obj = obj;
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
	} else {
@@ -4611,7 +4594,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
		if (obj->pages_pin_count == 0)
			cnt += obj->base.size >> PAGE_SHIFT;
	list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
			cnt += obj->base.size >> PAGE_SHIFT;

+5 −0
Original line number Diff line number Diff line
@@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
	case DP_LINK_BW_1_62:
	case DP_LINK_BW_2_7:
		break;
	case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
		max_link_bw = DP_LINK_BW_2_7;
		break;
	default:
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
		     max_link_bw);
		max_link_bw = DP_LINK_BW_1_62;
		break;
	}
+30 −1
Original line number Diff line number Diff line
@@ -5500,9 +5500,38 @@ void intel_gt_init(struct drm_device *dev)
	if (IS_VALLEYVIEW(dev)) {
		dev_priv->gt.force_wake_get = vlv_force_wake_get;
		dev_priv->gt.force_wake_put = vlv_force_wake_put;
	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
	} else if (IS_HASWELL(dev)) {
		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
	} else if (IS_IVYBRIDGE(dev)) {
		u32 ecobus;

		/* IVB configs may use multi-threaded forcewake */

		/* A small trick here - if the bios hasn't configured
		 * MT forcewake, and if the device is in RC6, then
		 * force_wake_mt_get will not wake the device and the
		 * ECOBUS read will return zero. Which will be
		 * (correctly) interpreted by the test below as MT
		 * forcewake being disabled.
		 */
		mutex_lock(&dev->struct_mutex);
		__gen6_gt_force_wake_mt_get(dev_priv);
		ecobus = I915_READ_NOTRACE(ECOBUS);
		__gen6_gt_force_wake_mt_put(dev_priv);
		mutex_unlock(&dev->struct_mutex);

		if (ecobus & FORCEWAKE_MT_ENABLE) {
			dev_priv->gt.force_wake_get =
						__gen6_gt_force_wake_mt_get;
			dev_priv->gt.force_wake_put =
						__gen6_gt_force_wake_mt_put;
		} else {
			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
			DRM_INFO("when using vblank-synced partial screen updates.\n");
			dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
			dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
		}
	} else if (IS_GEN6(dev)) {
		dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
		dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+23 −15
Original line number Diff line number Diff line
@@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
	return I915_READ(acthd_reg);
}

static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	u32 addr;

	addr = dev_priv->status_page_dmah->busaddr;
	if (INTEL_INFO(ring->dev)->gen >= 4)
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
	I915_WRITE(HWS_PGA, addr);
}

static int init_ring_common(struct intel_ring_buffer *ring)
{
	struct drm_device *dev = ring->dev;
@@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
	if (HAS_FORCE_WAKE(dev))
		gen6_gt_force_wake_get(dev_priv);

	if (I915_NEED_GFX_HWS(dev))
		intel_ring_setup_status_page(ring);
	else
		ring_setup_phys_status_page(ring);

	/* Stop the ring if it's running. */
	I915_WRITE_CTL(ring, 0);
	I915_WRITE_HEAD(ring, 0);
@@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
	struct pipe_control *pc = ring->private;
	struct drm_i915_gem_object *obj;

	if (!ring->private)
		return;

	obj = pc->obj;

	kunmap(sg_page(obj->pages->sgl));
@@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
	drm_gem_object_unreference(&obj->base);

	kfree(pc);
	ring->private = NULL;
}

static int init_render_ring(struct intel_ring_buffer *ring)
@@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
	if (HAS_BROKEN_CS_TLB(dev))
		drm_gem_object_unreference(to_gem_object(ring->private));

	if (INTEL_INFO(dev)->gen >= 5)
		cleanup_pipe_control(ring);

	ring->private = NULL;
}

static void
@@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring)
	ring->status_page.obj = obj;
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);

	intel_ring_setup_status_page(ring);
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
			ring->name, ring->status_page.gfx_addr);

@@ -1237,10 +1251,9 @@ static int init_status_page(struct intel_ring_buffer *ring)
	return ret;
}

static int init_phys_hws_pga(struct intel_ring_buffer *ring)
static int init_phys_status_page(struct intel_ring_buffer *ring)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	u32 addr;

	if (!dev_priv->status_page_dmah) {
		dev_priv->status_page_dmah =
@@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring)
			return -ENOMEM;
	}

	addr = dev_priv->status_page_dmah->busaddr;
	if (INTEL_INFO(ring->dev)->gen >= 4)
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
	I915_WRITE(HWS_PGA, addr);

	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);

@@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
			return ret;
	} else {
		BUG_ON(ring->id != RCS);
		ret = init_phys_hws_pga(ring);
		ret = init_phys_status_page(ring);
		if (ret)
			return ret;
	}
@@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
	}

	if (!I915_NEED_GFX_HWS(dev)) {
		ret = init_phys_hws_pga(ring);
		ret = init_phys_status_page(ring);
		if (ret)
			return ret;
	}
Loading