Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb82155d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm/radeon/kms: add workaround for dce3 ddc line vbios bug
  drm/radeon/kms: fix interlaced and doublescan handling
  drm/radeon/kms: fix typos in disabled vbios code
  Revert "drm/i915/dp: use VBT provided eDP params if available"
  drm/i915: Clear pfit registers when not used by any outputs
  drm: record monitor status in output_poll_execute
  drm: Set connector DPMS status to ON in drm_crtc_helper_set_config
  drm/i915: fix regression due to ba3d8d74
  Revert "drm/radeon/kms: fix typo in r600 cs checker"
  drm/i915/sdvo: Always add a 30ms delay to make SDVO TV detection reliable
  MAINTAINERS: INTEL DRM DRIVERS list (intel-gfx) is subscribers-only
  drm/i915/sdvo: Always fallback to querying the shared DDC line
  drm/i915: Handle pagefaults in execbuffer user relocations
  drm/i915/sdvo: Only enable HDMI encodings only if the commandset is supported
  drm/radeon/kms: fix resume regression for some r5xx laptops
  drm/radeon/kms: fix regression in rs4xx i2c setup
  drm/i915: Only save/restore cursor regs if !KMS
  drm/i915: Prevent integer overflow when validating the execbuffer
parents 22a5b566 3074adc8
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -2080,7 +2080,7 @@ F: include/drm/


INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M:	Chris Wilson <chris@chris-wilson.co.uk>
M:	Chris Wilson <chris@chris-wilson.co.uk>
L:	intel-gfx@lists.freedesktop.org
L:	intel-gfx@lists.freedesktop.org (subscribers-only)
L:	dri-devel@lists.freedesktop.org
L:	dri-devel@lists.freedesktop.org
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
S:	Supported
S:	Supported
+11 −3
Original line number Original line Diff line number Diff line
@@ -471,6 +471,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
	int count = 0, ro, fail = 0;
	int count = 0, ro, fail = 0;
	struct drm_crtc_helper_funcs *crtc_funcs;
	struct drm_crtc_helper_funcs *crtc_funcs;
	int ret = 0;
	int ret = 0;
	int i;


	DRM_DEBUG_KMS("\n");
	DRM_DEBUG_KMS("\n");


@@ -666,6 +667,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
		if (ret != 0)
		if (ret != 0)
			goto fail;
			goto fail;
	}
	}
	DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
	for (i = 0; i < set->num_connectors; i++) {
		DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
			      drm_get_connector_name(set->connectors[i]));
		set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
	}


	kfree(save_connectors);
	kfree(save_connectors);
	kfree(save_encoders);
	kfree(save_encoders);
@@ -841,7 +848,7 @@ static void output_poll_execute(struct work_struct *work)
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct delayed_work *delayed_work = to_delayed_work(work);
	struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
	struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
	struct drm_connector *connector;
	struct drm_connector *connector;
	enum drm_connector_status old_status, status;
	enum drm_connector_status old_status;
	bool repoll = false, changed = false;
	bool repoll = false, changed = false;


	if (!drm_kms_helper_poll)
	if (!drm_kms_helper_poll)
@@ -866,8 +873,9 @@ static void output_poll_execute(struct work_struct *work)
		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
		    !(connector->polled & DRM_CONNECTOR_POLL_HPD))
			continue;
			continue;


		status = connector->funcs->detect(connector, false);
		connector->status = connector->funcs->detect(connector, false);
		if (old_status != status)
		DRM_DEBUG_KMS("connector status updated to %d\n", connector->status);
		if (old_status != connector->status)
			changed = true;
			changed = true;
	}
	}


+319 −194
Original line number Original line Diff line number Diff line
@@ -38,8 +38,7 @@


static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);


static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
						  bool pipelined);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -2594,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
	if (reg->gpu) {
	if (reg->gpu) {
		int ret;
		int ret;


		ret = i915_gem_object_flush_gpu_write_domain(obj, true);
		ret = i915_gem_object_flush_gpu_write_domain(obj);
		if (ret)
		if (ret)
			return ret;
			return ret;


@@ -2742,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)


/** Flushes any GPU write domain for the object if it's dirty. */
/** Flushes any GPU write domain for the object if it's dirty. */
static int
static int
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
				       bool pipelined)
{
{
	struct drm_device *dev = obj->dev;
	struct drm_device *dev = obj->dev;
	uint32_t old_write_domain;
	uint32_t old_write_domain;
@@ -2762,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
					    obj->read_domains,
					    obj->read_domains,
					    old_write_domain);
					    old_write_domain);


	if (pipelined)
	return 0;
	return 0;

	return i915_gem_object_wait_rendering(obj, true);
}
}


/** Flushes the GTT write domain for the object if it's dirty. */
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2826,17 +2821,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
	if (obj_priv->gtt_space == NULL)
	if (obj_priv->gtt_space == NULL)
		return -EINVAL;
		return -EINVAL;


	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret != 0)
	if (ret != 0)
		return ret;
		return ret;

	i915_gem_object_flush_cpu_write_domain(obj);

	if (write) {
	ret = i915_gem_object_wait_rendering(obj, true);
	ret = i915_gem_object_wait_rendering(obj, true);
	if (ret)
	if (ret)
		return ret;
		return ret;
	}

	i915_gem_object_flush_cpu_write_domain(obj);


	old_write_domain = obj->write_domain;
	old_write_domain = obj->write_domain;
	old_read_domains = obj->read_domains;
	old_read_domains = obj->read_domains;
@@ -2875,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
	if (obj_priv->gtt_space == NULL)
	if (obj_priv->gtt_space == NULL)
		return -EINVAL;
		return -EINVAL;


	ret = i915_gem_object_flush_gpu_write_domain(obj, true);
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret)
	if (ret)
		return ret;
		return ret;


@@ -2924,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
	uint32_t old_write_domain, old_read_domains;
	uint32_t old_write_domain, old_read_domains;
	int ret;
	int ret;


	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret != 0)
	if (ret != 0)
		return ret;
		return ret;
	ret = i915_gem_object_wait_rendering(obj, true);
	if (ret)
		return ret;


	i915_gem_object_flush_gtt_write_domain(obj);
	i915_gem_object_flush_gtt_write_domain(obj);


@@ -2935,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
	 */
	 */
	i915_gem_object_set_to_full_cpu_read_domain(obj);
	i915_gem_object_set_to_full_cpu_read_domain(obj);


	if (write) {
		ret = i915_gem_object_wait_rendering(obj, true);
		if (ret)
			return ret;
	}

	old_write_domain = obj->write_domain;
	old_write_domain = obj->write_domain;
	old_read_domains = obj->read_domains;
	old_read_domains = obj->read_domains;


@@ -3205,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
	if (offset == 0 && size == obj->size)
	if (offset == 0 && size == obj->size)
		return i915_gem_object_set_to_cpu_domain(obj, 0);
		return i915_gem_object_set_to_cpu_domain(obj, 0);


	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret != 0)
	if (ret != 0)
		return ret;
		return ret;
	ret = i915_gem_object_wait_rendering(obj, true);
	if (ret)
		return ret;

	i915_gem_object_flush_gtt_write_domain(obj);
	i915_gem_object_flush_gtt_write_domain(obj);


	/* If we're already fully in the CPU read domain, we're done. */
	/* If we're already fully in the CPU read domain, we're done. */
@@ -3254,45 +3247,22 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
	return 0;
	return 0;
}
}


/**
 * Pin an object to the GTT and evaluate the relocations landing in it.
 */
static int
static int
i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
				   struct drm_file *file_priv,
				   struct drm_file *file_priv,
			     struct drm_i915_gem_exec_object2 *entry)
				   struct drm_i915_gem_exec_object2 *entry,
				   struct drm_i915_gem_relocation_entry *reloc)
{
{
	struct drm_device *dev = obj->base.dev;
	struct drm_device *dev = obj->base.dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *target_obj;
	struct drm_i915_gem_relocation_entry __user *user_relocs;
	struct drm_gem_object *target_obj = NULL;
	uint32_t target_handle = 0;
	int i, ret = 0;

	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
	for (i = 0; i < entry->relocation_count; i++) {
		struct drm_i915_gem_relocation_entry reloc;
	uint32_t target_offset;
	uint32_t target_offset;

	int ret = -EINVAL;
		if (__copy_from_user_inatomic(&reloc,
					      user_relocs+i,
					      sizeof(reloc))) {
			ret = -EFAULT;
			break;
		}

		if (reloc.target_handle != target_handle) {
			drm_gem_object_unreference(target_obj);


	target_obj = drm_gem_object_lookup(dev, file_priv,
	target_obj = drm_gem_object_lookup(dev, file_priv,
							   reloc.target_handle);
					   reloc->target_handle);
			if (target_obj == NULL) {
	if (target_obj == NULL)
				ret = -ENOENT;
		return -ENOENT;
				break;
			}


			target_handle = reloc.target_handle;
		}
	target_offset = to_intel_bo(target_obj)->gtt_offset;
	target_offset = to_intel_bo(target_obj)->gtt_offset;


#if WATCH_RELOC
#if WATCH_RELOC
@@ -3301,13 +3271,13 @@ i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
		 "presumed %08x delta %08x\n",
		 "presumed %08x delta %08x\n",
		 __func__,
		 __func__,
		 obj,
		 obj,
			 (int) reloc.offset,
		 (int) reloc->offset,
			 (int) reloc.target_handle,
		 (int) reloc->target_handle,
			 (int) reloc.read_domains,
		 (int) reloc->read_domains,
			 (int) reloc.write_domain,
		 (int) reloc->write_domain,
		 (int) target_offset,
		 (int) target_offset,
			 (int) reloc.presumed_offset,
		 (int) reloc->presumed_offset,
			 reloc.delta);
		 reloc->delta);
#endif
#endif


	/* The target buffer should have appeared before us in the
	/* The target buffer should have appeared before us in the
@@ -3315,127 +3285,188 @@ i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
	 */
	 */
	if (target_offset == 0) {
	if (target_offset == 0) {
		DRM_ERROR("No GTT space found for object %d\n",
		DRM_ERROR("No GTT space found for object %d\n",
				  reloc.target_handle);
			  reloc->target_handle);
			ret = -EINVAL;
		goto err;
			break;
	}
	}


	/* Validate that the target is in a valid r/w GPU domain */
	/* Validate that the target is in a valid r/w GPU domain */
		if (reloc.write_domain & (reloc.write_domain - 1)) {
	if (reloc->write_domain & (reloc->write_domain - 1)) {
		DRM_ERROR("reloc with multiple write domains: "
		DRM_ERROR("reloc with multiple write domains: "
			  "obj %p target %d offset %d "
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  "read %08x write %08x",
				  obj, reloc.target_handle,
			  obj, reloc->target_handle,
				  (int) reloc.offset,
			  (int) reloc->offset,
				  reloc.read_domains,
			  reloc->read_domains,
				  reloc.write_domain);
			  reloc->write_domain);
			ret = -EINVAL;
		goto err;
			break;
	}
	}
		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
	if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
	    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
		DRM_ERROR("reloc with read/write CPU domains: "
		DRM_ERROR("reloc with read/write CPU domains: "
			  "obj %p target %d offset %d "
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  "read %08x write %08x",
				  obj, reloc.target_handle,
			  obj, reloc->target_handle,
				  (int) reloc.offset,
			  (int) reloc->offset,
				  reloc.read_domains,
			  reloc->read_domains,
				  reloc.write_domain);
			  reloc->write_domain);
			ret = -EINVAL;
		goto err;
			break;
	}
	}
		if (reloc.write_domain && target_obj->pending_write_domain &&
	if (reloc->write_domain && target_obj->pending_write_domain &&
		    reloc.write_domain != target_obj->pending_write_domain) {
	    reloc->write_domain != target_obj->pending_write_domain) {
		DRM_ERROR("Write domain conflict: "
		DRM_ERROR("Write domain conflict: "
			  "obj %p target %d offset %d "
			  "obj %p target %d offset %d "
			  "new %08x old %08x\n",
			  "new %08x old %08x\n",
				  obj, reloc.target_handle,
			  obj, reloc->target_handle,
				  (int) reloc.offset,
			  (int) reloc->offset,
				  reloc.write_domain,
			  reloc->write_domain,
			  target_obj->pending_write_domain);
			  target_obj->pending_write_domain);
			ret = -EINVAL;
		goto err;
			break;
	}
	}


		target_obj->pending_read_domains |= reloc.read_domains;
	target_obj->pending_read_domains |= reloc->read_domains;
		target_obj->pending_write_domain |= reloc.write_domain;
	target_obj->pending_write_domain |= reloc->write_domain;


	/* If the relocation already has the right value in it, no
	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 * more work needs to be done.
	 */
	 */
		if (target_offset == reloc.presumed_offset)
	if (target_offset == reloc->presumed_offset)
			continue;
		goto out;


	/* Check that the relocation address is valid... */
	/* Check that the relocation address is valid... */
		if (reloc.offset > obj->base.size - 4) {
	if (reloc->offset > obj->base.size - 4) {
		DRM_ERROR("Relocation beyond object bounds: "
		DRM_ERROR("Relocation beyond object bounds: "
			  "obj %p target %d offset %d size %d.\n",
			  "obj %p target %d offset %d size %d.\n",
				  obj, reloc.target_handle,
			  obj, reloc->target_handle,
				  (int) reloc.offset, (int) obj->base.size);
			  (int) reloc->offset,
			ret = -EINVAL;
			  (int) obj->base.size);
			break;
		goto err;
	}
	}
		if (reloc.offset & 3) {
	if (reloc->offset & 3) {
		DRM_ERROR("Relocation not 4-byte aligned: "
		DRM_ERROR("Relocation not 4-byte aligned: "
			  "obj %p target %d offset %d.\n",
			  "obj %p target %d offset %d.\n",
				  obj, reloc.target_handle,
			  obj, reloc->target_handle,
				  (int) reloc.offset);
			  (int) reloc->offset);
			ret = -EINVAL;
		goto err;
			break;
	}
	}


	/* and points to somewhere within the target object. */
	/* and points to somewhere within the target object. */
		if (reloc.delta >= target_obj->size) {
	if (reloc->delta >= target_obj->size) {
		DRM_ERROR("Relocation beyond target object bounds: "
		DRM_ERROR("Relocation beyond target object bounds: "
			  "obj %p target %d delta %d size %d.\n",
			  "obj %p target %d delta %d size %d.\n",
				  obj, reloc.target_handle,
			  obj, reloc->target_handle,
				  (int) reloc.delta, (int) target_obj->size);
			  (int) reloc->delta,
			ret = -EINVAL;
			  (int) target_obj->size);
			break;
		goto err;
	}
	}


		reloc.delta += target_offset;
	reloc->delta += target_offset;
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
			uint32_t page_offset = reloc.offset & ~PAGE_MASK;
		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
		char *vaddr;
		char *vaddr;


			vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
			*(uint32_t *)(vaddr + page_offset) = reloc.delta;
		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
		kunmap_atomic(vaddr);
		kunmap_atomic(vaddr);
	} else {
	} else {
		struct drm_i915_private *dev_priv = dev->dev_private;
		uint32_t __iomem *reloc_entry;
		uint32_t __iomem *reloc_entry;
		void __iomem *reloc_page;
		void __iomem *reloc_page;


		ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
		ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
		if (ret)
		if (ret)
				break;
			goto err;


		/* Map the page containing the relocation we're going to perform.  */
		/* Map the page containing the relocation we're going to perform.  */
			reloc.offset += obj->gtt_offset;
		reloc->offset += obj->gtt_offset;
		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
							      reloc.offset & PAGE_MASK);
						      reloc->offset & PAGE_MASK);
		reloc_entry = (uint32_t __iomem *)
		reloc_entry = (uint32_t __iomem *)
				(reloc_page + (reloc.offset & ~PAGE_MASK));
			(reloc_page + (reloc->offset & ~PAGE_MASK));
			iowrite32(reloc.delta, reloc_entry);
		iowrite32(reloc->delta, reloc_entry);
		io_mapping_unmap_atomic(reloc_page);
		io_mapping_unmap_atomic(reloc_page);
	}
	}


	/* and update the user's relocation entry */
	/* and update the user's relocation entry */
		reloc.presumed_offset = target_offset;
	reloc->presumed_offset = target_offset;

out:
	ret = 0;
err:
	drm_gem_object_unreference(target_obj);
	return ret;
}

static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
				    struct drm_file *file_priv,
				    struct drm_i915_gem_exec_object2 *entry)
{
	struct drm_i915_gem_relocation_entry __user *user_relocs;
	int i, ret;

	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
	for (i = 0; i < entry->relocation_count; i++) {
		struct drm_i915_gem_relocation_entry reloc;

		if (__copy_from_user_inatomic(&reloc,
					      user_relocs+i,
					      sizeof(reloc)))
			return -EFAULT;

		ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
		if (ret)
			return ret;

		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
					    &reloc.presumed_offset,
					    &reloc.presumed_offset,
					      sizeof(reloc.presumed_offset))) {
					    sizeof(reloc.presumed_offset)))
		    ret = -EFAULT;
			return -EFAULT;
		    break;
	}
	}

	return 0;
}
}


	drm_gem_object_unreference(target_obj);
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
					 struct drm_file *file_priv,
					 struct drm_i915_gem_exec_object2 *entry,
					 struct drm_i915_gem_relocation_entry *relocs)
{
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
		ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
		if (ret)
			return ret;
	}

	return 0;
}

static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
			     struct drm_file *file,
			     struct drm_gem_object **object_list,
			     struct drm_i915_gem_exec_object2 *exec_list,
			     int count)
{
	int i, ret;

	for (i = 0; i < count; i++) {
		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
		obj->base.pending_read_domains = 0;
		obj->base.pending_write_domain = 0;
		ret = i915_gem_execbuffer_relocate_object(obj, file,
							  &exec_list[i]);
		if (ret)
			return ret;
			return ret;
	}
	}


	return 0;
}

static int
static int
i915_gem_execbuffer_pin(struct drm_device *dev,
i915_gem_execbuffer_reserve(struct drm_device *dev,
			    struct drm_file *file,
			    struct drm_file *file,
			    struct drm_gem_object **object_list,
			    struct drm_gem_object **object_list,
			    struct drm_i915_gem_exec_object2 *exec_list,
			    struct drm_i915_gem_exec_object2 *exec_list,
@@ -3501,6 +3532,87 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
	return 0;
	return 0;
}
}


static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
				  struct drm_file *file,
				  struct drm_gem_object **object_list,
				  struct drm_i915_gem_exec_object2 *exec_list,
				  int count)
{
	struct drm_i915_gem_relocation_entry *reloc;
	int i, total, ret;

	for (i = 0; i < count; i++) {
		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
		obj->in_execbuffer = false;
	}

	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
		total += exec_list[i].relocation_count;

	reloc = drm_malloc_ab(total, sizeof(*reloc));
	if (reloc == NULL) {
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;

		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;

		if (copy_from_user(reloc+total, user_relocs,
				   exec_list[i].relocation_count *
				   sizeof(*reloc))) {
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

		total += exec_list[i].relocation_count;
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

	ret = i915_gem_execbuffer_reserve(dev, file,
					  object_list, exec_list,
					  count);
	if (ret)
		goto err;

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
		obj->base.pending_read_domains = 0;
		obj->base.pending_write_domain = 0;
		ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
							       &exec_list[i],
							       reloc + total);
		if (ret)
			goto err;

		total += exec_list[i].relocation_count;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
	return ret;
}

static int
static int
i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
				struct drm_file *file,
				struct drm_file *file,
@@ -3630,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,


	for (i = 0; i < count; i++) {
	for (i = 0; i < count; i++) {
		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
		size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
		int length; /* limited by fault_in_pages_readable() */


		/* First check for malicious input causing overflow */
		if (exec[i].relocation_count >
		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
			return -EINVAL;

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
		if (!access_ok(VERIFY_READ, ptr, length))
		if (!access_ok(VERIFY_READ, ptr, length))
			return -EFAULT;
			return -EFAULT;


@@ -3774,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	}
	}


	/* Move the objects en-masse into the GTT, evicting if necessary. */
	/* Move the objects en-masse into the GTT, evicting if necessary. */
	ret = i915_gem_execbuffer_pin(dev, file,
	ret = i915_gem_execbuffer_reserve(dev, file,
					  object_list, exec_list,
					  object_list, exec_list,
					  args->buffer_count);
					  args->buffer_count);
	if (ret)
	if (ret)
		goto err;
		goto err;


	/* The objects are in their final locations, apply the relocations. */
	/* The objects are in their final locations, apply the relocations. */
	for (i = 0; i < args->buffer_count; i++) {
	ret = i915_gem_execbuffer_relocate(dev, file,
		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
					   object_list, exec_list,
		obj->base.pending_read_domains = 0;
					   args->buffer_count);
		obj->base.pending_write_domain = 0;
	if (ret) {
		ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
		if (ret == -EFAULT) {
			ret = i915_gem_execbuffer_relocate_slow(dev, file,
								object_list,
								exec_list,
								args->buffer_count);
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
		if (ret)
			goto err;
			goto err;
	}
	}
+20 −20
Original line number Original line Diff line number Diff line
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
	if (drm_core_check_feature(dev, DRIVER_MODESET))
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return;
		return;


	/* Cursor state */
	dev_priv->saveCURACNTR = I915_READ(CURACNTR);
	dev_priv->saveCURAPOS = I915_READ(CURAPOS);
	dev_priv->saveCURABASE = I915_READ(CURABASE);
	dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
	dev_priv->saveCURBPOS = I915_READ(CURBPOS);
	dev_priv->saveCURBBASE = I915_READ(CURBBASE);
	if (IS_GEN2(dev))
		dev_priv->saveCURSIZE = I915_READ(CURSIZE);

	if (HAS_PCH_SPLIT(dev)) {
	if (HAS_PCH_SPLIT(dev)) {
		dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
		dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
		dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
		dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
	I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
	I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));


	/* Cursor state */
	I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
	I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
	I915_WRITE(CURABASE, dev_priv->saveCURABASE);
	I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
	I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
	I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
	if (IS_GEN2(dev))
		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);

	return;
	return;
}
}


@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
	/* Don't save them in KMS mode */
	/* Don't save them in KMS mode */
	i915_save_modeset_reg(dev);
	i915_save_modeset_reg(dev);


	/* Cursor state */
	dev_priv->saveCURACNTR = I915_READ(CURACNTR);
	dev_priv->saveCURAPOS = I915_READ(CURAPOS);
	dev_priv->saveCURABASE = I915_READ(CURABASE);
	dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
	dev_priv->saveCURBPOS = I915_READ(CURBPOS);
	dev_priv->saveCURBBASE = I915_READ(CURBBASE);
	if (IS_GEN2(dev))
		dev_priv->saveCURSIZE = I915_READ(CURSIZE);

	/* CRT state */
	/* CRT state */
	if (HAS_PCH_SPLIT(dev)) {
	if (HAS_PCH_SPLIT(dev)) {
		dev_priv->saveADPA = I915_READ(PCH_ADPA);
		dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
	/* Don't restore them in KMS mode */
	/* Don't restore them in KMS mode */
	i915_restore_modeset_reg(dev);
	i915_restore_modeset_reg(dev);


	/* Cursor state */
	I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
	I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
	I915_WRITE(CURABASE, dev_priv->saveCURABASE);
	I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
	I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
	I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
	if (IS_GEN2(dev))
		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);

	/* CRT state */
	/* CRT state */
	if (HAS_PCH_SPLIT(dev))
	if (HAS_PCH_SPLIT(dev))
		I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
		I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
+6 −1
Original line number Original line Diff line number Diff line
@@ -5336,9 +5336,14 @@ static void intel_setup_outputs(struct drm_device *dev)
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_encoder *encoder;
	struct intel_encoder *encoder;
	bool dpd_is_edp = false;
	bool dpd_is_edp = false;
	bool has_lvds = false;


	if (IS_MOBILE(dev) && !IS_I830(dev))
	if (IS_MOBILE(dev) && !IS_I830(dev))
		intel_lvds_init(dev);
		has_lvds = intel_lvds_init(dev);
	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
		/* disable the panel fitter on everything but LVDS */
		I915_WRITE(PFIT_CONTROL, 0);
	}


	if (HAS_PCH_SPLIT(dev)) {
	if (HAS_PCH_SPLIT(dev)) {
		dpd_is_edp = intel_dpd_is_edp(dev);
		dpd_is_edp = intel_dpd_is_edp(dev);
Loading