Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28419261 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-2013-06-18' of...

Merge tag 'drm-intel-next-2013-06-18' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Last 3.11 feature pull. I have a few odds bits and pieces and fixes in my
queue, I'll sort them out later on to see what's for 3.11-fixes and what's
for 3.12. But nothing to hold this here up imo.

Highlights:
- more hangcheck work from Mika and Chris to prepare for arb robustness
- trickle feed fixes from Ville
- first parts of the shared pch pll rework, with some basic hw state
  readout and cross-checking (this shuts up the confused pch pll refcount
  WARN that Linus just recently forwarded)
- Haswell audio power well support from Wang Xingchao (alsa bits acked by
  Takashi)
- some cleanups and asserts sprinkling around the plane/gamma enabling
  sequence from Ville
- more gtt refactoring from Ben
- clear up the adjusted->mode vs. pixel clock vs. port clock confusion
- 30bpp support, this time for real hopefully

* tag 'drm-intel-next-2013-06-18' of git://people.freedesktop.org/~danvet/drm-intel: (97 commits)
  drm/i915: remove a superflous semi-colon
  drm/i915: Kill useless "Enable panel fitter" comments
  drm/i915: Remove extra "ring" from error message
  drm/i915: simplify the reduced clock handling for pch plls
  drm/i915: stop killing pfit on i9xx
  drm/i915: explicitly set up PIPECONF (and gamma table) on haswell
  drm/i915: set up PIPECONF explicitly for i9xx/vlv platforms
  drm/i915: set up PIPECONF explicitly on ilk-ivb
  drm/i915: find guilty batch buffer on ring resets
  drm/i915: store ring hangcheck action
  drm/i915: add batch bo to i915_add_request()
  drm/i915: change i915_add_request to macro
  drm/i915: add i915_gem_context_get_hang_stats()
  drm/i915: add struct i915_ctx_hang_stats
  drm/i915: Try harder to disable trickle feed on VLV
  drm/i915: fix up pch pll enabling for pixel multipliers
  drm/i915: hw state readout and cross-checking for shared dplls
  drm/i915: WARN on lack of shared dpll
  drm/i915: split up intel_modeset_check_state
  drm/i915: extract readout_hw_state from setup_hw_state
  ...

Conflicts:
	drivers/gpu/drm/i915/intel_display.c
	drivers/gpu/drm/i915/intel_fb.c
	drivers/gpu/drm/i915/intel_sdvo.c
parents 4a009085 854c94a7
Loading
Loading
Loading
Loading
+48 −5
Original line number Diff line number Diff line
@@ -196,6 +196,32 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
	} \
} while (0)

struct file_stats {
	int count;
	size_t total, active, inactive, unbound;
};

static int per_file_stats(int id, void *ptr, void *data)
{
	struct drm_i915_gem_object *obj = ptr;
	struct file_stats *stats = data;

	stats->count++;
	stats->total += obj->base.size;

	if (obj->gtt_space) {
		if (!list_empty(&obj->ring_list))
			stats->active += obj->base.size;
		else
			stats->inactive += obj->base.size;
	} else {
		if (!list_empty(&obj->global_list))
			stats->unbound += obj->base.size;
	}

	return 0;
}

static int i915_gem_object_info(struct seq_file *m, void* data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -204,6 +230,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
	u32 count, mappable_count, purgeable_count;
	size_t size, mappable_size, purgeable_size;
	struct drm_i915_gem_object *obj;
	struct drm_file *file;
	int ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -215,7 +242,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
		   dev_priv->mm.object_memory);

	size = count = mappable_size = mappable_count = 0;
	count_objects(&dev_priv->mm.bound_list, gtt_list);
	count_objects(&dev_priv->mm.bound_list, global_list);
	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
		   count, mappable_count, size, mappable_size);

@@ -230,7 +257,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
		   count, mappable_count, size, mappable_size);

	size = count = purgeable_size = purgeable_count = 0;
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
		size += obj->base.size, ++count;
		if (obj->madv == I915_MADV_DONTNEED)
			purgeable_size += obj->base.size, ++purgeable_count;
@@ -238,7 +265,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);

	size = count = mappable_size = mappable_count = 0;
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
		if (obj->fault_mappable) {
			size += obj->gtt_space->size;
			++count;
@@ -263,6 +290,21 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
		   dev_priv->gtt.total,
		   dev_priv->gtt.mappable_end - dev_priv->gtt.start);

	seq_printf(m, "\n");
	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
		struct file_stats stats;

		memset(&stats, 0, sizeof(stats));
		idr_for_each(&file->object_idr, per_file_stats, &stats);
		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
			   get_pid_task(file->pid, PIDTYPE_PID)->comm,
			   stats.count,
			   stats.total,
			   stats.active,
			   stats.inactive,
			   stats.unbound);
	}

	mutex_unlock(&dev->struct_mutex);

	return 0;
@@ -283,7 +325,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
		return ret;

	total_obj_size = total_gtt_size = count = 0;
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
		if (list == PINNED_LIST && obj->pin_count == 0)
			continue;

@@ -1944,7 +1986,8 @@ i915_drop_caches_set(void *data, u64 val)
	}

	if (val & DROP_UNBOUND) {
		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
					 global_list)
			if (obj->pages_pin_count == 0) {
				ret = i915_gem_object_put_pages(obj);
				if (ret)
+8 −3
Original line number Diff line number Diff line
@@ -1001,8 +1001,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
		value = 1;
		break;
	default:
		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
				 param->param);
		DRM_DEBUG("Unknown parameter %d\n", param->param);
		return -EINVAL;
	}

@@ -1633,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
	/* Start out suspended */
	dev_priv->mm.suspended = 1;

	if (HAS_POWER_WELL(dev))
		i915_init_power_well(dev);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = i915_load_modeset_init(dev);
		if (ret < 0) {
@@ -1684,6 +1686,9 @@ int i915_driver_unload(struct drm_device *dev)

	intel_gpu_ips_teardown();

	if (HAS_POWER_WELL(dev))
		i915_remove_power_well(dev);

	i915_teardown_sysfs(dev);

	if (dev_priv->mm.inactive_shrinker.shrink)
@@ -1775,7 +1780,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
	struct drm_i915_file_private *file_priv;

	DRM_DEBUG_DRIVER("\n");
	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

+3 −10
Original line number Diff line number Diff line
@@ -457,7 +457,6 @@ void intel_detect_pch(struct drm_device *dev)
	 */
	if (INTEL_INFO(dev)->num_pipes == 0) {
		dev_priv->pch_type = PCH_NOP;
		dev_priv->num_pch_pll = 0;
		return;
	}

@@ -476,34 +475,28 @@ void intel_detect_pch(struct drm_device *dev)

			if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_IBX;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
				WARN_ON(!IS_GEN5(dev));
			} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_CPT;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found CougarPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
				/* PantherPoint is CPT compatible */
				dev_priv->pch_type = PCH_CPT;
				dev_priv->num_pch_pll = 2;
				DRM_DEBUG_KMS("Found PatherPoint PCH\n");
				WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
			} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(IS_ULT(dev));
			} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
				dev_priv->pch_type = PCH_LPT;
				dev_priv->num_pch_pll = 0;
				DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
				WARN_ON(!IS_HASWELL(dev));
				WARN_ON(!IS_ULT(dev));
			}
			BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
		}
		pci_dev_put(pch);
	}
@@ -570,7 +563,7 @@ static int i915_drm_freeze(struct drm_device *dev)
	intel_opregion_fini(dev);

	console_lock();
	intel_fbdev_set_suspend(dev, 1);
	intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
	console_unlock();

	return 0;
@@ -614,7 +607,7 @@ void intel_console_resume(struct work_struct *work)
	struct drm_device *dev = dev_priv->dev;

	console_lock();
	intel_fbdev_set_suspend(dev, 0);
	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
	console_unlock();
}

@@ -683,7 +676,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
	 * path of resume if possible.
	 */
	if (console_trylock()) {
		intel_fbdev_set_suspend(dev, 0);
		intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
		console_unlock();
	} else {
		schedule_work(&dev_priv->console_resume_work);
+91 −14
Original line number Diff line number Diff line
@@ -132,15 +132,38 @@ enum hpd_pin {
	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
		if ((intel_encoder)->base.crtc == (__crtc))

struct intel_pch_pll {
struct drm_i915_private;

enum intel_dpll_id {
	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
	/* real shared dpll ids must be >= 0 */
	DPLL_ID_PCH_PLL_A,
	DPLL_ID_PCH_PLL_B,
};
#define I915_NUM_PLLS 2

struct intel_dpll_hw_state {
	uint32_t dpll;
	uint32_t fp0;
	uint32_t fp1;
};

struct intel_shared_dpll {
	int refcount; /* count of number of CRTCs sharing this PLL */
	int active; /* count of number of active CRTCs (i.e. DPMS on) */
	bool on; /* is the PLL actually active? Disabled during modeset */
	int pll_reg;
	int fp0_reg;
	int fp1_reg;
	const char *name;
	/* should match the index in the dev_priv->shared_dplls array */
	enum intel_dpll_id id;
	struct intel_dpll_hw_state hw_state;
	void (*enable)(struct drm_i915_private *dev_priv,
		       struct intel_shared_dpll *pll);
	void (*disable)(struct drm_i915_private *dev_priv,
			struct intel_shared_dpll *pll);
	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
			     struct intel_shared_dpll *pll,
			     struct intel_dpll_hw_state *hw_state);
};
#define I915_NUM_PLLS 2

/* Used by dp and fdi links */
struct intel_link_m_n {
@@ -195,7 +218,6 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct drm_i915_private;

struct intel_opregion {
	struct opregion_header __iomem *header;
@@ -306,6 +328,8 @@ struct drm_i915_error_state {

struct intel_crtc_config;
struct intel_crtc;
struct intel_limit;
struct dpll;

struct drm_i915_display_funcs {
	bool (*fbc_enabled)(struct drm_device *dev);
@@ -313,6 +337,24 @@ struct drm_i915_display_funcs {
	void (*disable_fbc)(struct drm_device *dev);
	int (*get_display_clock_speed)(struct drm_device *dev);
	int (*get_fifo_size)(struct drm_device *dev, int plane);
	/**
	 * find_dpll() - Find the best values for the PLL
	 * @limit: limits for the PLL
	 * @crtc: current CRTC
	 * @target: target frequency in kHz
	 * @refclk: reference clock frequency in kHz
	 * @match_clock: if provided, @best_clock P divider must
	 *               match the P divider from @match_clock
	 *               used for LVDS downclocking
	 * @best_clock: best PLL values found
	 *
	 * Returns true on success, false on failure.
	 */
	bool (*find_dpll)(const struct intel_limit *limit,
			  struct drm_crtc *crtc,
			  int target, int refclk,
			  struct dpll *match_clock,
			  struct dpll *best_clock);
	void (*update_wm)(struct drm_device *dev);
	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
				 uint32_t sprite_width, int pixel_size,
@@ -466,6 +508,13 @@ struct i915_hw_ppgtt {
	void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};

struct i915_ctx_hang_stats {
	/* This context had batch pending when hang was declared */
	unsigned batch_pending;

	/* This context had batch active when hang was declared */
	unsigned batch_active;
};

/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
@@ -476,6 +525,7 @@ struct i915_hw_context {
	struct drm_i915_file_private *file_priv;
	struct intel_ring_buffer *ring;
	struct drm_i915_gem_object *obj;
	struct i915_ctx_hang_stats hang_stats;
};

enum no_fbc_reason {
@@ -720,6 +770,15 @@ struct intel_ilk_power_mgmt {
	struct drm_i915_gem_object *renderctx;
};

/* Power well structure for haswell */
struct i915_power_well {
	struct drm_device *device;
	spinlock_t lock;
	/* power well enable/disable usage count */
	int count;
	int i915_request;
};

struct i915_dri1_state {
	unsigned allow_batchbuffer : 1;
	u32 __iomem *gfx_hws_cpu_addr;
@@ -842,7 +901,6 @@ struct i915_gpu_error {
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
	struct timer_list hangcheck_timer;
	int hangcheck_count;

	/* For reset and error_state handling. */
	spinlock_t lock;
@@ -998,7 +1056,6 @@ typedef struct drm_i915_private {
	u32 hpd_event_bits;
	struct timer_list hotplug_reenable_timer;

	int num_pch_pll;
	int num_plane;

	unsigned long cfb_size;
@@ -1059,7 +1116,8 @@ typedef struct drm_i915_private {
	struct drm_crtc *pipe_to_crtc_mapping[3];
	wait_queue_head_t pending_flip_queue;

	struct intel_pch_pll pch_plls[I915_NUM_PLLS];
	int num_shared_dpll;
	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
	struct intel_ddi_plls ddi_plls;

	/* Reclocking support */
@@ -1080,6 +1138,9 @@ typedef struct drm_i915_private {
	 * mchdev_lock in intel_pm.c */
	struct intel_ilk_power_mgmt ips;

	/* Haswell power well */
	struct i915_power_well power_well;

	enum no_fbc_reason no_fbc_reason;

	struct drm_mm_node *compressed_fb;
@@ -1154,7 +1215,7 @@ struct drm_i915_gem_object {
	struct drm_mm_node *gtt_space;
	/** Stolen memory for this object, instead of being backed by shmem. */
	struct drm_mm_node *stolen;
	struct list_head gtt_list;
	struct list_head global_list;

	/** This object's place on the active/inactive lists */
	struct list_head ring_list;
@@ -1301,12 +1362,18 @@ struct drm_i915_gem_request {
	/** GEM sequence number associated with this request. */
	uint32_t seqno;

	/** Postion in the ringbuffer of the end of the request */
	/** Position in the ringbuffer of the start of the request */
	u32 head;

	/** Position in the ringbuffer of the end of the request */
	u32 tail;

	/** Context related to this request */
	struct i915_hw_context *ctx;

	/** Batch buffer related to this request if any */
	struct drm_i915_gem_object *batch_obj;

	/** Time at which this request was emitted, in jiffies. */
	unsigned long emitted_jiffies;

@@ -1324,6 +1391,8 @@ struct drm_i915_file_private {
		struct list_head request_list;
	} mm;
	struct idr context_idr;

	struct i915_ctx_hang_stats hang_stats;
};

#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
@@ -1660,6 +1729,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
		dev_priv->fence_regs[obj->fence_reg].pin_count--;
	}
}
@@ -1692,9 +1762,12 @@ void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_ring_buffer *ring,
		       struct drm_file *file,
		       struct drm_i915_gem_object *batch_obj,
		       u32 *seqno);
#define i915_add_request(ring, seqno) \
	__i915_add_request(ring, NULL, NULL, seqno)
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
				 uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1748,6 +1821,10 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
	kref_put(&ctx->ref, i915_gem_context_free);
}

struct i915_ctx_hang_stats * __must_check
i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
				struct drm_file *file,
				u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+134 −20
Original line number Diff line number Diff line
@@ -176,7 +176,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,

	pinned = 0;
	mutex_lock(&dev->struct_mutex);
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
		if (obj->pin_count)
			pinned += obj->gtt_space->size;
	mutex_unlock(&dev->struct_mutex);
@@ -956,7 +956,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)

	ret = 0;
	if (seqno == ring->outstanding_lazy_request)
		ret = i915_add_request(ring, NULL, NULL);
		ret = i915_add_request(ring, NULL);

	return ret;
}
@@ -1676,7 +1676,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
	list_del(&obj->gtt_list);
	list_del(&obj->global_list);

	ops->put_pages(obj);
	obj->pages = NULL;
@@ -1696,7 +1696,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,

	list_for_each_entry_safe(obj, next,
				 &dev_priv->mm.unbound_list,
				 gtt_list) {
				 global_list) {
		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
		    i915_gem_object_put_pages(obj) == 0) {
			count += obj->base.size >> PAGE_SHIFT;
@@ -1733,7 +1733,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)

	i915_gem_evict_everything(dev_priv->dev);

	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
				 global_list)
		i915_gem_object_put_pages(obj);
}

@@ -1858,7 +1859,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
	if (ret)
		return ret;

	list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
	return 0;
}

@@ -1996,17 +1997,18 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
	return 0;
}

int
i915_add_request(struct intel_ring_buffer *ring,
int __i915_add_request(struct intel_ring_buffer *ring,
		       struct drm_file *file,
		       struct drm_i915_gem_object *obj,
		       u32 *out_seqno)
{
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
	struct drm_i915_gem_request *request;
	u32 request_ring_position;
	u32 request_ring_position, request_start;
	int was_empty;
	int ret;

	request_start = intel_ring_get_tail(ring);
	/*
	 * Emit any outstanding flushes - execbuf can fail to emit the flush
	 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2038,8 +2040,17 @@ i915_add_request(struct intel_ring_buffer *ring,

	request->seqno = intel_ring_get_seqno(ring);
	request->ring = ring;
	request->head = request_start;
	request->tail = request_ring_position;
	request->ctx = ring->last_context;
	request->batch_obj = obj;

	/* Whilst this request exists, batch_obj will be on the
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */

	if (request->ctx)
		i915_gem_context_reference(request->ctx);
@@ -2096,6 +2107,94 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
	spin_unlock(&file_priv->mm.lock);
}

static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
{
	if (acthd >= obj->gtt_offset &&
	    acthd < obj->gtt_offset + obj->base.size)
		return true;

	return false;
}

static bool i915_head_inside_request(const u32 acthd_unmasked,
				     const u32 request_start,
				     const u32 request_end)
{
	const u32 acthd = acthd_unmasked & HEAD_ADDR;

	if (request_start < request_end) {
		if (acthd >= request_start && acthd < request_end)
			return true;
	} else if (request_start > request_end) {
		if (acthd >= request_start || acthd < request_end)
			return true;
	}

	return false;
}

static bool i915_request_guilty(struct drm_i915_gem_request *request,
				const u32 acthd, bool *inside)
{
	/* There is a possibility that unmasked head address
	 * pointing inside the ring, matches the batch_obj address range.
	 * However this is extremely unlikely.
	 */

	if (request->batch_obj) {
		if (i915_head_inside_object(acthd, request->batch_obj)) {
			*inside = true;
			return true;
		}
	}

	if (i915_head_inside_request(acthd, request->head, request->tail)) {
		*inside = false;
		return true;
	}

	return false;
}

static void i915_set_reset_status(struct intel_ring_buffer *ring,
				  struct drm_i915_gem_request *request,
				  u32 acthd)
{
	struct i915_ctx_hang_stats *hs = NULL;
	bool inside, guilty;

	/* Innocent until proven guilty */
	guilty = false;

	if (ring->hangcheck.action != wait &&
	    i915_request_guilty(request, acthd, &inside)) {
		DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
			  ring->name,
			  inside ? "inside" : "flushing",
			  request->batch_obj ?
			  request->batch_obj->gtt_offset : 0,
			  request->ctx ? request->ctx->id : 0,
			  acthd);

		guilty = true;
	}

	/* If contexts are disabled or this is the default context, use
	 * file_priv->reset_state
	 */
	if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
		hs = &request->ctx->hang_stats;
	else if (request->file_priv)
		hs = &request->file_priv->hang_stats;

	if (hs) {
		if (guilty)
			hs->batch_active++;
		else
			hs->batch_pending++;
	}
}

static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
	list_del(&request->list);
@@ -2110,6 +2209,12 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
				      struct intel_ring_buffer *ring)
{
	u32 completed_seqno;
	u32 acthd;

	acthd = intel_ring_get_active_head(ring);
	completed_seqno = ring->get_seqno(ring, false);

	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;

@@ -2117,6 +2222,9 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
					   struct drm_i915_gem_request,
					   list);

		if (request->seqno > completed_seqno)
			i915_set_reset_status(ring, request, acthd);

		i915_gem_free_request(request);
	}

@@ -2276,7 +2384,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
	idle = true;
	for_each_ring(ring, dev_priv, i) {
		if (ring->gpu_caches_dirty)
			i915_add_request(ring, NULL, NULL);
			i915_add_request(ring, NULL);

		idle &= list_empty(&ring->request_list);
	}
@@ -2508,9 +2616,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
		obj->has_aliasing_ppgtt_mapping = 0;
	}
	i915_gem_gtt_finish_object(obj);
	i915_gem_object_unpin_pages(obj);

	list_del(&obj->mm_list);
	list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
	list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
	/* Avoid an unnecessary call to unbind on rebind. */
	obj->map_and_fenceable = true;

@@ -2918,7 +3027,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
	struct drm_i915_gem_object *obj;
	int err = 0;

	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
		if (obj->gtt_space == NULL) {
			printk(KERN_ERR "object found on GTT list with no space reserved\n");
			err++;
@@ -3042,7 +3151,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
		return ret;
	}

	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);

	obj->gtt_space = node;
@@ -3057,7 +3166,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,

	obj->map_and_fenceable = mappable && fenceable;

	i915_gem_object_unpin_pages(obj);
	trace_i915_gem_object_bind(obj, map_and_fenceable);
	i915_gem_verify_gtt(dev);
	return 0;
@@ -3757,7 +3865,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
{
	INIT_LIST_HEAD(&obj->mm_list);
	INIT_LIST_HEAD(&obj->gtt_list);
	INIT_LIST_HEAD(&obj->global_list);
	INIT_LIST_HEAD(&obj->ring_list);
	INIT_LIST_HEAD(&obj->exec_list);

@@ -3857,6 +3965,12 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
		dev_priv->mm.interruptible = was_interruptible;
	}

	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
	 * before progressing. */
	if (obj->stolen)
		i915_gem_object_unpin_pages(obj);

	if (WARN_ON(obj->pages_pin_count))
		obj->pages_pin_count = 0;
	i915_gem_object_put_pages(obj);
	i915_gem_object_free_mmap_offset(obj);
@@ -4498,10 +4612,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
	}

	cnt = 0;
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
		if (obj->pages_pin_count == 0)
			cnt += obj->base.size >> PAGE_SHIFT;
	list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
	list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
			cnt += obj->base.size >> PAGE_SHIFT;

Loading