Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c0f3f90c authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next

Changes for vmwgfx for 4.4. If there is time, I'll follow up with a series
to move to threaded irqs.

* 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux:
  drm/vmwgfx: Replace iowrite/ioread with volatile memory accesses
  drm/vmwgfx: Turn off support for multisample count != 0 v2
  drm/vmwgfx: switch from ioremap_cache to memremap
parents a76edb8c b76ff5ea
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -752,8 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

	dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
					    dev_priv->mmio_size);
	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
@@ -907,7 +907,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
	iounmap(dev_priv->mmio_virt);
	memunmap(dev_priv->mmio_virt);
out_err3:
	vmw_ttm_global_release(dev_priv);
out_err0:
@@ -958,7 +958,7 @@ static int vmw_driver_unload(struct drm_device *dev)
		pci_release_regions(dev->pdev);

	ttm_object_device_release(&dev_priv->tdev);
	iounmap(dev_priv->mmio_virt);
	memunmap(dev_priv->mmio_virt);
	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	vmw_ttm_global_release(dev_priv);
+27 −1
Original line number Diff line number Diff line
@@ -375,7 +375,7 @@ struct vmw_private {
	uint32_t stdu_max_height;
	uint32_t initial_width;
	uint32_t initial_height;
	u32 __iomem *mmio_virt;
	u32 *mmio_virt;
	uint32_t capabilities;
	uint32_t max_gmr_ids;
	uint32_t max_gmr_pages;
@@ -1206,4 +1206,30 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
{
	atomic_dec(&dev_priv->num_fifo_resources);
}

/**
 * vmw_mmio_read - Perform a MMIO read from volatile memory
 *
 * @addr: The address to read from
 *
 * This function is intended to be equivalent to ioread32() on
 * memremap'd memory, but without byteswapping.
 */
static inline u32 vmw_mmio_read(u32 *addr)
{
	return READ_ONCE(*addr);
}

/**
 * vmw_mmio_write - Perform a MMIO write to volatile memory
 *
 * @addr: The address to write to
 *
 * This function is intended to be equivalent to iowrite32 on
 * memremap'd memory, but without byteswapping.
 */
static inline void vmw_mmio_write(u32 value, u32 *addr)
{
	WRITE_ONCE(*addr, value);
}
#endif
+12 −12
Original line number Diff line number Diff line
@@ -142,8 +142,8 @@ static bool vmw_fence_enable_signaling(struct fence *f)
	struct vmw_fence_manager *fman = fman_from_fence(fence);
	struct vmw_private *dev_priv = fman->dev_priv;

	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
	u32 *fifo_mem = dev_priv->mmio_virt;
	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
		return false;

@@ -386,14 +386,14 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
				      u32 passed_seqno)
{
	u32 goal_seqno;
	u32 __iomem *fifo_mem;
	u32 *fifo_mem;
	struct vmw_fence_obj *fence;

	if (likely(!fman->seqno_valid))
		return false;

	fifo_mem = fman->dev_priv->mmio_virt;
	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
		return false;

@@ -401,7 +401,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
	list_for_each_entry(fence, &fman->fence_list, head) {
		if (!list_empty(&fence->seq_passed_actions)) {
			fman->seqno_valid = true;
			iowrite32(fence->base.seqno,
			vmw_mmio_write(fence->base.seqno,
				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
			break;
		}
@@ -430,18 +430,18 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
	struct vmw_fence_manager *fman = fman_from_fence(fence);
	u32 goal_seqno;
	u32 __iomem *fifo_mem;
	u32 *fifo_mem;

	if (fence_is_signaled_locked(&fence->base))
		return false;

	fifo_mem = fman->dev_priv->mmio_virt;
	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
	if (likely(fman->seqno_valid &&
		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
		return false;

	iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
	fman->seqno_valid = true;

	return true;
@@ -453,9 +453,9 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
	struct list_head action_list;
	bool needs_rerun;
	uint32_t seqno, new_seqno;
	u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
	u32 *fifo_mem = fman->dev_priv->mmio_virt;

	seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
rerun:
	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -477,7 +477,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)

	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
	if (unlikely(needs_rerun)) {
		new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
		if (new_seqno != seqno) {
			seqno = new_seqno;
			goto rerun;
+50 −59
Original line number Diff line number Diff line
@@ -36,7 +36,7 @@ struct vmw_temp_set_context {

bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	u32 *fifo_mem = dev_priv->mmio_virt;
	uint32_t fifo_min, hwversion;
	const struct vmw_fifo_state *fifo = &dev_priv->fifo;

@@ -60,11 +60,11 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
		return false;

	fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
	fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
		return false;

	hwversion = ioread32(fifo_mem +
	hwversion = vmw_mmio_read(fifo_mem +
				  ((fifo->capabilities &
				    SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
				   SVGA_FIFO_3D_HWVERSION_REVISED :
@@ -85,13 +85,13 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)

bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
{
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	u32  *fifo_mem = dev_priv->mmio_virt;
	uint32_t caps;

	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
		return false;

	caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
	caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
		return true;

@@ -100,7 +100,7 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)

int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	u32  *fifo_mem = dev_priv->mmio_virt;
	uint32_t max;
	uint32_t min;

@@ -137,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
	if (min < PAGE_SIZE)
		min = PAGE_SIZE;

	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
	vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
	vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
	wmb();
	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
	vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
	vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
	vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
	mb();

	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);

	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
	max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
	min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
	fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);

	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
		 (unsigned int) max,
@@ -157,7 +157,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
		 (unsigned int) fifo->capabilities);

	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
	vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
	vmw_marker_queue_init(&fifo->marker_queue);

	return 0;
@@ -165,31 +165,23 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)

void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	static DEFINE_SPINLOCK(ping_lock);
	unsigned long irq_flags;
	u32 *fifo_mem = dev_priv->mmio_virt;

	/*
	 * The ping_lock is needed because we don't have an atomic
	 * test-and-set of the SVGA_FIFO_BUSY register.
	 */
	spin_lock_irqsave(&ping_lock, irq_flags);
	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
	preempt_disable();
	if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
	}
	spin_unlock_irqrestore(&ping_lock, irq_flags);
	preempt_enable();
}

void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	u32  *fifo_mem = dev_priv->mmio_virt;

	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
		;

	dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
	dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);

	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
		  dev_priv->config_done_state);
@@ -213,11 +205,11 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)

static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
	uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
	u32  *fifo_mem = dev_priv->mmio_virt;
	uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
	uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
	uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
	uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);

	return ((max - next_cmd) + (stop - min) <= bytes);
}
@@ -321,7 +313,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
				    uint32_t bytes)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	u32  *fifo_mem = dev_priv->mmio_virt;
	uint32_t max;
	uint32_t min;
	uint32_t next_cmd;
@@ -329,9 +321,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
	int ret;

	mutex_lock(&fifo_state->fifo_mutex);
	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	min = ioread32(fifo_mem + SVGA_FIFO_MIN);
	next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
	max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
	min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
	next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);

	if (unlikely(bytes >= (max - min)))
		goto out_err;
@@ -342,7 +334,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
	fifo_state->reserved_size = bytes;

	while (1) {
		uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
		uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
		bool need_bounce = false;
		bool reserve_in_place = false;

@@ -376,7 +368,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
				fifo_state->using_bounce_buffer = false;

				if (reserveable)
					iowrite32(bytes, fifo_mem +
					vmw_mmio_write(bytes, fifo_mem +
						       SVGA_FIFO_RESERVED);
				return (void __force *) (fifo_mem +
							 (next_cmd >> 2));
@@ -427,7 +419,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
}

static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
			      u32 __iomem *fifo_mem,
			      u32  *fifo_mem,
			      uint32_t next_cmd,
			      uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -439,17 +431,16 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
	if (bytes < chunk_size)
		chunk_size = bytes;

	iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
	vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
	mb();
	memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
	rest = bytes - chunk_size;
	if (rest)
		memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
			    rest);
		memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
}

static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
			       u32 __iomem *fifo_mem,
			       u32  *fifo_mem,
			       uint32_t next_cmd,
			       uint32_t max, uint32_t min, uint32_t bytes)
{
@@ -457,12 +448,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
	    fifo_state->dynamic_buffer : fifo_state->static_buffer;

	while (bytes > 0) {
		iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
		vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
		next_cmd += sizeof(uint32_t);
		if (unlikely(next_cmd == max))
			next_cmd = min;
		mb();
		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
		vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
		mb();
		bytes -= sizeof(uint32_t);
	}
@@ -471,10 +462,10 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
	u32 __iomem *fifo_mem = dev_priv->mmio_virt;
	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
	u32  *fifo_mem = dev_priv->mmio_virt;
	uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
	uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
	uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;

	if (fifo_state->dx)
@@ -507,11 +498,11 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
		if (next_cmd >= max)
			next_cmd -= max - min;
		mb();
		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
		vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
	}

	if (reserveable)
		iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
		vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
	mb();
	up_write(&fifo_state->rwsem);
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+28 −10
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
		break;
	case DRM_VMW_PARAM_FIFO_HW_VERSION:
	{
		u32 __iomem *fifo_mem = dev_priv->mmio_virt;
		u32 *fifo_mem = dev_priv->mmio_virt;
		const struct vmw_fifo_state *fifo = &dev_priv->fifo;

		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -73,7 +73,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
		}

		param->value =
			ioread32(fifo_mem +
			vmw_mmio_read(fifo_mem +
				      ((fifo->capabilities &
					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
				       SVGA_FIFO_3D_HWVERSION_REVISED :
@@ -122,6 +122,22 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
	return 0;
}

static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
{
	/* If the header is updated, update the format test as well! */
	BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);

	if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
	    cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
		fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
			       SVGADX_DXFMT_MULTISAMPLE_4 |
			       SVGADX_DXFMT_MULTISAMPLE_8);
	else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
		return 0;

	return fmt_value;
}

static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
			       size_t size)
{
@@ -147,7 +163,8 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
	for (i = 0; i < max_size; ++i) {
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
		compat_cap->pairs[i][0] = i;
		compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		compat_cap->pairs[i][1] = vmw_mask_multisample
			(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
	}
	spin_unlock(&dev_priv->cap_lock);

@@ -162,7 +179,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
		(struct drm_vmw_get_3d_cap_arg *) data;
	struct vmw_private *dev_priv = vmw_priv(dev);
	uint32_t size;
	u32 __iomem *fifo_mem;
	u32 *fifo_mem;
	void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
	void *bounce;
	int ret;
@@ -202,7 +219,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
		spin_lock(&dev_priv->cap_lock);
		for (i = 0; i < num; ++i) {
			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
			*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
			*bounce32++ = vmw_mask_multisample
				(i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
		}
		spin_unlock(&dev_priv->cap_lock);
	} else if (gb_objects) {
@@ -211,7 +229,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
			goto out_err;
	} else {
		fifo_mem = dev_priv->mmio_virt;
		memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
		memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
	}

	ret = copy_to_user(buffer, bounce, size);
Loading