Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5f3dbedf authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-intel-next' of ../anholt-2.6 into drm-linus

parents 7a1fb5d0 1055f9dd
Loading
Loading
Loading
Loading
+1 −6
Original line number Original line Diff line number Diff line
@@ -505,7 +505,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
	struct drm_local_map *map = NULL;
	struct drm_local_map *map = NULL;
	struct drm_gem_object *obj;
	struct drm_gem_object *obj;
	struct drm_hash_item *hash;
	struct drm_hash_item *hash;
	unsigned long prot;
	int ret = 0;
	int ret = 0;


	mutex_lock(&dev->struct_mutex);
	mutex_lock(&dev->struct_mutex);
@@ -538,11 +537,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
	vma->vm_ops = obj->dev->driver->gem_vm_ops;
	vma->vm_private_data = map->handle;
	vma->vm_private_data = map->handle;
	/* FIXME: use pgprot_writecombine when available */
	/* FIXME: use pgprot_writecombine when available */
	prot = pgprot_val(vma->vm_page_prot);
	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
#ifdef CONFIG_X86
	prot |= _PAGE_CACHE_WC;
#endif
	vma->vm_page_prot = __pgprot(prot);


	/* Take a ref for this mapping of the object, so that the fault
	/* Take a ref for this mapping of the object, so that the fault
	 * handler can dereference the mmap offset's pointer to the object.
	 * handler can dereference the mmap offset's pointer to the object.
+1 −0
Original line number Original line Diff line number Diff line
@@ -451,6 +451,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)


	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
}
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);


/**
/**
 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+1 −8
Original line number Original line Diff line number Diff line
@@ -922,7 +922,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
	 * Some of the preallocated space is taken by the GTT
	 * Some of the preallocated space is taken by the GTT
	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
	 */
	 */
	if (IS_G4X(dev))
	if (IS_G4X(dev) || IS_IGD(dev))
		overhead = 4096;
		overhead = 4096;
	else
	else
		overhead = (*aperture_size / 1024) + 4096;
		overhead = (*aperture_size / 1024) + 4096;
@@ -1030,13 +1030,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
	if (ret)
	if (ret)
		goto destroy_ringbuffer;
		goto destroy_ringbuffer;


	/* FIXME: re-add hotplug support */
#if 0
	ret = drm_hotplug_init(dev);
	if (ret)
		goto destroy_ringbuffer;
#endif

	/* Always safe in the mode setting case. */
	/* Always safe in the mode setting case. */
	/* FIXME: do pre/post-mode set stuff in core KMS code */
	/* FIXME: do pre/post-mode set stuff in core KMS code */
	dev->vblank_disable_allowed = 1;
	dev->vblank_disable_allowed = 1;
+5 −0
Original line number Original line Diff line number Diff line
@@ -159,6 +159,9 @@ typedef struct drm_i915_private {
	u32 irq_mask_reg;
	u32 irq_mask_reg;
	u32 pipestat[2];
	u32 pipestat[2];


	u32 hotplug_supported_mask;
	struct work_struct hotplug_work;

	int tex_lru_log_granularity;
	int tex_lru_log_granularity;
	int allow_batchbuffer;
	int allow_batchbuffer;
	struct mem_block *agp_heap;
	struct mem_block *agp_heap;
@@ -297,6 +300,7 @@ typedef struct drm_i915_private {
		 *
		 *
		 * A reference is held on the buffer while on this list.
		 * A reference is held on the buffer while on this list.
		 */
		 */
		spinlock_t active_list_lock;
		struct list_head active_list;
		struct list_head active_list;


		/**
		/**
@@ -810,6 +814,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
						      IS_I915GM(dev)))
						      IS_I915GM(dev)))
#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))


#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
#define PRIMARY_RINGBUFFER_SIZE         (128*1024)


+29 −9
Original line number Original line Diff line number Diff line
@@ -1072,6 +1072,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
	case -EAGAIN:
	case -EAGAIN:
		return VM_FAULT_OOM;
		return VM_FAULT_OOM;
	case -EFAULT:
	case -EFAULT:
	case -EINVAL:
		return VM_FAULT_SIGBUS;
		return VM_FAULT_SIGBUS;
	default:
	default:
		return VM_FAULT_NOPAGE;
		return VM_FAULT_NOPAGE;
@@ -1324,8 +1325,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
		obj_priv->active = 1;
		obj_priv->active = 1;
	}
	}
	/* Move from whatever list we were on to the tail of execution. */
	/* Move from whatever list we were on to the tail of execution. */
	spin_lock(&dev_priv->mm.active_list_lock);
	list_move_tail(&obj_priv->list,
	list_move_tail(&obj_priv->list,
		       &dev_priv->mm.active_list);
		       &dev_priv->mm.active_list);
	spin_unlock(&dev_priv->mm.active_list_lock);
	obj_priv->last_rendering_seqno = seqno;
	obj_priv->last_rendering_seqno = seqno;
}
}


@@ -1467,6 +1470,7 @@ i915_gem_retire_request(struct drm_device *dev,
	/* Move any buffers on the active list that are no longer referenced
	/* Move any buffers on the active list that are no longer referenced
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
	 */
	 */
	spin_lock(&dev_priv->mm.active_list_lock);
	while (!list_empty(&dev_priv->mm.active_list)) {
	while (!list_empty(&dev_priv->mm.active_list)) {
		struct drm_gem_object *obj;
		struct drm_gem_object *obj;
		struct drm_i915_gem_object *obj_priv;
		struct drm_i915_gem_object *obj_priv;
@@ -1481,7 +1485,7 @@ i915_gem_retire_request(struct drm_device *dev,
		 * this seqno.
		 * this seqno.
		 */
		 */
		if (obj_priv->last_rendering_seqno != request->seqno)
		if (obj_priv->last_rendering_seqno != request->seqno)
			return;
			goto out;


#if WATCH_LRU
#if WATCH_LRU
		DRM_INFO("%s: retire %d moves to inactive list %p\n",
		DRM_INFO("%s: retire %d moves to inactive list %p\n",
@@ -1493,6 +1497,8 @@ i915_gem_retire_request(struct drm_device *dev,
		else
		else
			i915_gem_object_move_to_inactive(obj);
			i915_gem_object_move_to_inactive(obj);
	}
	}
out:
	spin_unlock(&dev_priv->mm.active_list_lock);
}
}


/**
/**
@@ -1990,20 +1996,23 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
	int regnum = obj_priv->fence_reg;
	int regnum = obj_priv->fence_reg;
	uint32_t val;
	uint32_t val;
	uint32_t pitch_val;
	uint32_t pitch_val;
	uint32_t fence_size_bits;


	if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
	if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
	    (obj_priv->gtt_offset & (obj->size - 1))) {
	    (obj_priv->gtt_offset & (obj->size - 1))) {
		WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
		WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
		     __func__, obj_priv->gtt_offset);
		     __func__, obj_priv->gtt_offset);
		return;
		return;
	}
	}


	pitch_val = (obj_priv->stride / 128) - 1;
	pitch_val = (obj_priv->stride / 128) - 1;

	WARN_ON(pitch_val & ~0x0000000f);
	val = obj_priv->gtt_offset;
	val = obj_priv->gtt_offset;
	if (obj_priv->tiling_mode == I915_TILING_Y)
	if (obj_priv->tiling_mode == I915_TILING_Y)
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
	val |= I830_FENCE_SIZE_BITS(obj->size);
	fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
	WARN_ON(fence_size_bits & ~0x00000f00);
	val |= fence_size_bits;
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= I830_FENCE_REG_VALID;
	val |= I830_FENCE_REG_VALID;


@@ -2194,7 +2203,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
		return -EBUSY;
		return -EBUSY;
	if (alignment == 0)
	if (alignment == 0)
		alignment = i915_gem_get_gtt_alignment(obj);
		alignment = i915_gem_get_gtt_alignment(obj);
	if (alignment & (PAGE_SIZE - 1)) {
	if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
		return -EINVAL;
		return -EINVAL;
	}
	}
@@ -2211,15 +2220,20 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
		}
		}
	}
	}
	if (obj_priv->gtt_space == NULL) {
	if (obj_priv->gtt_space == NULL) {
		bool lists_empty;

		/* If the gtt is empty and we're still having trouble
		/* If the gtt is empty and we're still having trouble
		 * fitting our object in, we're out of memory.
		 * fitting our object in, we're out of memory.
		 */
		 */
#if WATCH_LRU
#if WATCH_LRU
		DRM_INFO("%s: GTT full, evicting something\n", __func__);
		DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
#endif
		if (list_empty(&dev_priv->mm.inactive_list) &&
		spin_lock(&dev_priv->mm.active_list_lock);
		lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
			       list_empty(&dev_priv->mm.flushing_list) &&
			       list_empty(&dev_priv->mm.flushing_list) &&
		    list_empty(&dev_priv->mm.active_list)) {
			       list_empty(&dev_priv->mm.active_list));
		spin_unlock(&dev_priv->mm.active_list_lock);
		if (lists_empty) {
			DRM_ERROR("GTT full, but LRU list empty\n");
			DRM_ERROR("GTT full, but LRU list empty\n");
			return -ENOMEM;
			return -ENOMEM;
		}
		}
@@ -3675,6 +3689,7 @@ i915_gem_idle(struct drm_device *dev)


	i915_gem_retire_requests(dev);
	i915_gem_retire_requests(dev);


	spin_lock(&dev_priv->mm.active_list_lock);
	if (!dev_priv->mm.wedged) {
	if (!dev_priv->mm.wedged) {
		/* Active and flushing should now be empty as we've
		/* Active and flushing should now be empty as we've
		 * waited for a sequence higher than any pending execbuffer
		 * waited for a sequence higher than any pending execbuffer
@@ -3701,6 +3716,7 @@ i915_gem_idle(struct drm_device *dev)
		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
		obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
		i915_gem_object_move_to_inactive(obj_priv->obj);
		i915_gem_object_move_to_inactive(obj_priv->obj);
	}
	}
	spin_unlock(&dev_priv->mm.active_list_lock);


	while (!list_empty(&dev_priv->mm.flushing_list)) {
	while (!list_empty(&dev_priv->mm.flushing_list)) {
		struct drm_i915_gem_object *obj_priv;
		struct drm_i915_gem_object *obj_priv;
@@ -3949,7 +3965,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
	if (ret != 0)
	if (ret != 0)
		return ret;
		return ret;


	spin_lock(&dev_priv->mm.active_list_lock);
	BUG_ON(!list_empty(&dev_priv->mm.active_list));
	BUG_ON(!list_empty(&dev_priv->mm.active_list));
	spin_unlock(&dev_priv->mm.active_list_lock);

	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
	BUG_ON(!list_empty(&dev_priv->mm.request_list));
	BUG_ON(!list_empty(&dev_priv->mm.request_list));
@@ -3993,6 +4012,7 @@ i915_gem_load(struct drm_device *dev)
{
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_private_t *dev_priv = dev->dev_private;


	spin_lock_init(&dev_priv->mm.active_list_lock);
	INIT_LIST_HEAD(&dev_priv->mm.active_list);
	INIT_LIST_HEAD(&dev_priv->mm.active_list);
	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
Loading