Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c3663216 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'vmwgfx-fixes-3.19' of git://people.freedesktop.org/~thomash/linux into drm-fixes

fix a vmwgfx regression sleeping wrong task state.

* 'vmwgfx-fixes-3.19' of git://people.freedesktop.org/~thomash/linux:
  drm/vmwgfx: Replace the hw mutex with a hw spinlock
parents b942c653 496eb6fd
Loading
Loading
Loading
Loading
+5 −23
Original line number Original line Diff line number Diff line
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
		if (unlikely(ret != 0))
		if (unlikely(ret != 0))
			--dev_priv->num_3d_resources;
			--dev_priv->num_3d_resources;
	} else if (unhide_svga) {
	} else if (unhide_svga) {
		mutex_lock(&dev_priv->hw_mutex);
		vmw_write(dev_priv, SVGA_REG_ENABLE,
		vmw_write(dev_priv, SVGA_REG_ENABLE,
			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
			  vmw_read(dev_priv, SVGA_REG_ENABLE) &
			  ~SVGA_REG_ENABLE_HIDE);
			  ~SVGA_REG_ENABLE_HIDE);
		mutex_unlock(&dev_priv->hw_mutex);
	}
	}


	mutex_unlock(&dev_priv->release_mutex);
	mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
	mutex_lock(&dev_priv->release_mutex);
	mutex_lock(&dev_priv->release_mutex);
	if (unlikely(--dev_priv->num_3d_resources == 0))
	if (unlikely(--dev_priv->num_3d_resources == 0))
		vmw_release_device(dev_priv);
		vmw_release_device(dev_priv);
	else if (hide_svga) {
	else if (hide_svga)
		mutex_lock(&dev_priv->hw_mutex);
		vmw_write(dev_priv, SVGA_REG_ENABLE,
		vmw_write(dev_priv, SVGA_REG_ENABLE,
			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
			  vmw_read(dev_priv, SVGA_REG_ENABLE) |
			  SVGA_REG_ENABLE_HIDE);
			  SVGA_REG_ENABLE_HIDE);
		mutex_unlock(&dev_priv->hw_mutex);
	}


	n3d = (int32_t) dev_priv->num_3d_resources;
	n3d = (int32_t) dev_priv->num_3d_resources;
	mutex_unlock(&dev_priv->release_mutex);
	mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
	dev_priv->dev = dev;
	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
	dev_priv->vmw_chipset = chipset;
	dev_priv->last_read_seqno = (uint32_t) -100;
	dev_priv->last_read_seqno = (uint32_t) -100;
	mutex_init(&dev_priv->hw_mutex);
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->cmdbuf_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->release_mutex);
	mutex_init(&dev_priv->binding_mutex);
	mutex_init(&dev_priv->binding_mutex);
	rwlock_init(&dev_priv->resource_lock);
	rwlock_init(&dev_priv->resource_lock);
	ttm_lock_init(&dev_priv->reservation_sem);
	ttm_lock_init(&dev_priv->reservation_sem);
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);


	for (i = vmw_res_context; i < vmw_res_max; ++i) {
	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)


	dev_priv->enable_fb = enable_fbdev;
	dev_priv->enable_fb = enable_fbdev;


	mutex_lock(&dev_priv->hw_mutex);

	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
		ret = -ENOSYS;
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
		mutex_unlock(&dev_priv->hw_mutex);
		goto out_err0;
		goto out_err0;
	}
	}


@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
		dev_priv->prim_bb_mem = dev_priv->vram_size;
		dev_priv->prim_bb_mem = dev_priv->vram_size;


	ret = vmw_dma_masks(dev_priv);
	ret = vmw_dma_masks(dev_priv);
	if (unlikely(ret != 0)) {
	if (unlikely(ret != 0))
		mutex_unlock(&dev_priv->hw_mutex);
		goto out_err0;
		goto out_err0;
	}


	/*
	/*
	 * Limit back buffer size to VRAM size.  Remove this once
	 * Limit back buffer size to VRAM size.  Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
	if (dev_priv->prim_bb_mem > dev_priv->vram_size)
	if (dev_priv->prim_bb_mem > dev_priv->vram_size)
		dev_priv->prim_bb_mem = dev_priv->vram_size;
		dev_priv->prim_bb_mem = dev_priv->vram_size;


	mutex_unlock(&dev_priv->hw_mutex);

	vmw_print_capabilities(dev_priv->capabilities);
	vmw_print_capabilities(dev_priv->capabilities);


	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
		if (unlikely(ret != 0))
		if (unlikely(ret != 0))
			return ret;
			return ret;
		vmw_kms_save_vga(dev_priv);
		vmw_kms_save_vga(dev_priv);
		mutex_lock(&dev_priv->hw_mutex);
		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
		vmw_write(dev_priv, SVGA_REG_TRACES, 0);
		mutex_unlock(&dev_priv->hw_mutex);
	}
	}


	if (active) {
	if (active) {
@@ -1196,9 +1184,7 @@ static int vmw_master_set(struct drm_device *dev,
	if (!dev_priv->enable_fb) {
	if (!dev_priv->enable_fb) {
		vmw_kms_restore_vga(dev_priv);
		vmw_kms_restore_vga(dev_priv);
		vmw_3d_resource_dec(dev_priv, true);
		vmw_3d_resource_dec(dev_priv, true);
		mutex_lock(&dev_priv->hw_mutex);
		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
		mutex_unlock(&dev_priv->hw_mutex);
	}
	}
	return ret;
	return ret;
}
}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
			DRM_ERROR("Unable to clean VRAM on master drop.\n");
			DRM_ERROR("Unable to clean VRAM on master drop.\n");
		vmw_kms_restore_vga(dev_priv);
		vmw_kms_restore_vga(dev_priv);
		vmw_3d_resource_dec(dev_priv, true);
		vmw_3d_resource_dec(dev_priv, true);
		mutex_lock(&dev_priv->hw_mutex);
		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
		vmw_write(dev_priv, SVGA_REG_TRACES, 1);
		mutex_unlock(&dev_priv->hw_mutex);
	}
	}


	dev_priv->active_master = &dev_priv->fbdev_master;
	dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_private *dev_priv = vmw_priv(dev);


	mutex_lock(&dev_priv->hw_mutex);
	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	(void) vmw_read(dev_priv, SVGA_REG_ID);
	(void) vmw_read(dev_priv, SVGA_REG_ID);
	mutex_unlock(&dev_priv->hw_mutex);


	/**
	/**
	 * Reclaim 3d reference held by fbdev and potentially
	 * Reclaim 3d reference held by fbdev and potentially
+21 −4
Original line number Original line Diff line number Diff line
@@ -399,7 +399,8 @@ struct vmw_private {
	uint32_t memory_size;
	uint32_t memory_size;
	bool has_gmr;
	bool has_gmr;
	bool has_mob;
	bool has_mob;
	struct mutex hw_mutex;
	spinlock_t hw_lock;
	spinlock_t cap_lock;


	/*
	/*
	 * VGA registers.
	 * VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
	atomic_t marker_seq;
	atomic_t marker_seq;
	wait_queue_head_t fence_queue;
	wait_queue_head_t fence_queue;
	wait_queue_head_t fifo_queue;
	wait_queue_head_t fifo_queue;
	int fence_queue_waiters; /* Protected by hw_mutex */
	spinlock_t waiter_lock;
	int goal_queue_waiters; /* Protected by hw_mutex */
	int fence_queue_waiters; /* Protected by waiter_lock */
	int goal_queue_waiters; /* Protected by waiter_lock */
	atomic_t fifo_queue_waiters;
	atomic_t fifo_queue_waiters;
	uint32_t last_read_seqno;
	uint32_t last_read_seqno;
	spinlock_t irq_lock;
	spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
	return (struct vmw_master *) master->driver_priv;
	return (struct vmw_master *) master->driver_priv;
}
}


/*
 * The locking here is fine-grained, so that it is performed once
 * for every read- and write operation. This is of course costly, but we
 * don't perform much register access in the timing critical paths anyway.
 * Instead we have the extra benefit of being sure that we don't forget
 * the hw lock around register accesses.
 */
static inline void vmw_write(struct vmw_private *dev_priv,
static inline void vmw_write(struct vmw_private *dev_priv,
			     unsigned int offset, uint32_t value)
			     unsigned int offset, uint32_t value)
{
{
	unsigned long irq_flags;

	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
}


static inline uint32_t vmw_read(struct vmw_private *dev_priv,
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
				unsigned int offset)
				unsigned int offset)
{
{
	uint32_t val;
	unsigned long irq_flags;
	u32 val;


	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);

	return val;
	return val;
}
}


+2 −16
Original line number Original line Diff line number Diff line
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
	struct vmw_private *dev_priv;
	struct vmw_private *dev_priv;
	spinlock_t lock;
	spinlock_t lock;
	struct list_head fence_list;
	struct list_head fence_list;
	struct work_struct work, ping_work;
	struct work_struct work;
	u32 user_fence_size;
	u32 user_fence_size;
	u32 fence_size;
	u32 fence_size;
	u32 event_fence_action_size;
	u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
	return "svga";
	return "svga";
}
}


static void vmw_fence_ping_func(struct work_struct *work)
{
	struct vmw_fence_manager *fman =
		container_of(work, struct vmw_fence_manager, ping_work);

	vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
}

static bool vmw_fence_enable_signaling(struct fence *f)
static bool vmw_fence_enable_signaling(struct fence *f)
{
{
	struct vmw_fence_obj *fence =
	struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
		return false;
		return false;


	if (mutex_trylock(&dev_priv->hw_mutex)) {
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
		vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
		mutex_unlock(&dev_priv->hw_mutex);
	} else
		schedule_work(&fman->ping_work);


	return true;
	return true;
}
}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
	INIT_LIST_HEAD(&fman->fence_list);
	INIT_LIST_HEAD(&fman->fence_list);
	INIT_LIST_HEAD(&fman->cleanup_list);
	INIT_LIST_HEAD(&fman->cleanup_list);
	INIT_WORK(&fman->work, &vmw_fence_work_func);
	INIT_WORK(&fman->work, &vmw_fence_work_func);
	INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
	fman->fifo_down = true;
	fman->fifo_down = true;
	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
	bool lists_empty;
	bool lists_empty;


	(void) cancel_work_sync(&fman->work);
	(void) cancel_work_sync(&fman->work);
	(void) cancel_work_sync(&fman->ping_work);


	spin_lock_irqsave(&fman->lock, irq_flags);
	spin_lock_irqsave(&fman->lock, irq_flags);
	lists_empty = list_empty(&fman->fence_list) &&
	lists_empty = list_empty(&fman->fence_list) &&
+15 −21
Original line number Original line Diff line number Diff line
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
		if (!dev_priv->has_mob)
		if (!dev_priv->has_mob)
			return false;
			return false;


		mutex_lock(&dev_priv->hw_mutex);
		spin_lock(&dev_priv->cap_lock);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
		result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		mutex_unlock(&dev_priv->hw_mutex);
		spin_unlock(&dev_priv->cap_lock);


		return (result != 0);
		return (result != 0);
	}
	}
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));


	mutex_lock(&dev_priv->hw_mutex);
	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
	mb();
	mb();


	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
	mutex_unlock(&dev_priv->hw_mutex);


	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
	return vmw_fifo_send_fence(dev_priv, &dummy);
	return vmw_fifo_send_fence(dev_priv, &dummy);
}
}


void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
{
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
	static DEFINE_SPINLOCK(ping_lock);
	unsigned long irq_flags;


	/*
	 * The ping_lock is needed because we don't have an atomic
	 * test-and-set of the SVGA_FIFO_BUSY register.
	 */
	spin_lock_irqsave(&ping_lock, irq_flags);
	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
	}
	}
}
	spin_unlock_irqrestore(&ping_lock, irq_flags);

void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
	mutex_lock(&dev_priv->hw_mutex);

	vmw_fifo_ping_host_locked(dev_priv, reason);

	mutex_unlock(&dev_priv->hw_mutex);
}
}


void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
{
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;


	mutex_lock(&dev_priv->hw_mutex);

	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
		;
		;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
	vmw_write(dev_priv, SVGA_REG_TRACES,
	vmw_write(dev_priv, SVGA_REG_TRACES,
		  dev_priv->traces_state);
		  dev_priv->traces_state);


	mutex_unlock(&dev_priv->hw_mutex);
	vmw_marker_queue_takedown(&fifo->marker_queue);
	vmw_marker_queue_takedown(&fifo->marker_queue);


	if (likely(fifo->static_buffer != NULL)) {
	if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
		return vmw_fifo_wait_noirq(dev_priv, bytes,
		return vmw_fifo_wait_noirq(dev_priv, bytes,
					   interruptible, timeout);
					   interruptible, timeout);


	mutex_lock(&dev_priv->hw_mutex);
	spin_lock(&dev_priv->waiter_lock);
	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
		outl(SVGA_IRQFLAG_FIFO_PROGRESS,
		outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
	}
	}
	mutex_unlock(&dev_priv->hw_mutex);
	spin_unlock(&dev_priv->waiter_lock);


	if (interruptible)
	if (interruptible)
		ret = wait_event_interruptible_timeout
		ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
	else if (likely(ret > 0))
	else if (likely(ret > 0))
		ret = 0;
		ret = 0;


	mutex_lock(&dev_priv->hw_mutex);
	spin_lock(&dev_priv->waiter_lock);
	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
	}
	}
	mutex_unlock(&dev_priv->hw_mutex);
	spin_unlock(&dev_priv->waiter_lock);


	return ret;
	return ret;
}
}
+4 −4
Original line number Original line Diff line number Diff line
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;


	mutex_lock(&dev_priv->hw_mutex);
	spin_lock(&dev_priv->cap_lock);
	for (i = 0; i < max_size; ++i) {
	for (i = 0; i < max_size; ++i) {
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
		compat_cap->pairs[i][0] = i;
		compat_cap->pairs[i][0] = i;
		compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
	}
	}
	mutex_unlock(&dev_priv->hw_mutex);
	spin_unlock(&dev_priv->cap_lock);


	return 0;
	return 0;
}
}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
		if (num > SVGA3D_DEVCAP_MAX)
		if (num > SVGA3D_DEVCAP_MAX)
			num = SVGA3D_DEVCAP_MAX;
			num = SVGA3D_DEVCAP_MAX;


		mutex_lock(&dev_priv->hw_mutex);
		spin_lock(&dev_priv->cap_lock);
		for (i = 0; i < num; ++i) {
		for (i = 0; i < num; ++i) {
			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
			*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
			*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		}
		}
		mutex_unlock(&dev_priv->hw_mutex);
		spin_unlock(&dev_priv->cap_lock);
	} else if (gb_objects) {
	} else if (gb_objects) {
		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
		if (unlikely(ret != 0))
		if (unlikely(ret != 0))
Loading