Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a3579e3 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux into drm-fixes

Pull request of 2015-09-24

Vmwgfx fixes for 4.3:
 - A couple of uninitialized variable fixes by Christian Engelmayer
 - A TTM fix for a bug that causes problems with the new vmwgfx device init
 - A vmwgfx refcounting fix
 - A vmwgfx iomem caching fix
 - A DRM change to allow also control clients to read the drm driver version.

* tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux:
  drm: Allow also control clients to check the drm version
  drm/vmwgfx: Fix uninitialized return in vmw_kms_helper_dirty()
  drm/vmwgfx: Fix uninitialized return in vmw_cotable_unbind()
  drm/vmwgfx: Only build on X86
  drm/ttm: Fix memory space allocation v2
  drm/vmwgfx: Map the fifo as cached
  drm/vmwgfx: Fix up user_dmabuf refcounting
parents e4b35f95 30c64664
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);


/** Ioctl table */
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
static const struct drm_ioctl_desc drm_ioctls[] = {
	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
		      DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+16 −12
Original line number Original line Diff line number Diff line
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
		if (ret)
		if (ret)
			return ret;
			return ret;
		man = &bdev->man[mem_type];
		man = &bdev->man[mem_type];
		if (!man->has_type || !man->use_type)
			continue;


		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
		type_ok = ttm_bo_mt_compatible(man, mem_type, place,
						&cur_flags);
						&cur_flags);
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
		if (!type_ok)
		if (!type_ok)
			continue;
			continue;


		type_found = true;
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
						  cur_flags);
		/*
		/*
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
		if (mem_type == TTM_PL_SYSTEM)
		if (mem_type == TTM_PL_SYSTEM)
			break;
			break;


		if (man->has_type && man->use_type) {
			type_found = true;
		ret = (*man->func->get_node)(man, bo, place, mem);
		ret = (*man->func->get_node)(man, bo, place, mem);
		if (unlikely(ret))
		if (unlikely(ret))
			return ret;
			return ret;
		}
		
		if (mem->mm_node)
		if (mem->mm_node)
			break;
			break;
	}
	}
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
		return 0;
		return 0;
	}
	}


	if (!type_found)
		return -EINVAL;

	for (i = 0; i < placement->num_busy_placement; ++i) {
	for (i = 0; i < placement->num_busy_placement; ++i) {
		const struct ttm_place *place = &placement->busy_placement[i];
		const struct ttm_place *place = &placement->busy_placement[i];


@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
		if (ret)
		if (ret)
			return ret;
			return ret;
		man = &bdev->man[mem_type];
		man = &bdev->man[mem_type];
		if (!man->has_type)
		if (!man->has_type || !man->use_type)
			continue;
			continue;
		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
		if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
			continue;
			continue;


		type_found = true;
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
						  cur_flags);
						  cur_flags);
		/*
		/*
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
		if (ret == -ERESTARTSYS)
		if (ret == -ERESTARTSYS)
			has_erestartsys = true;
			has_erestartsys = true;
	}
	}
	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;

	return ret;
	if (!type_found) {
		printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
		return -EINVAL;
	}

	return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
}
}
EXPORT_SYMBOL(ttm_bo_mem_space);
EXPORT_SYMBOL(ttm_bo_mem_space);


+1 −1
Original line number Original line Diff line number Diff line
config DRM_VMWGFX
config DRM_VMWGFX
	tristate "DRM driver for VMware Virtual GPU"
	tristate "DRM driver for VMware Virtual GPU"
	depends on DRM && PCI
	depends on DRM && PCI && X86
	select FB_DEFERRED_IO
	select FB_DEFERRED_IO
	select FB_CFB_FILLRECT
	select FB_CFB_FILLRECT
	select FB_CFB_COPYAREA
	select FB_CFB_COPYAREA
+1 −2
Original line number Original line Diff line number Diff line
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
	struct vmw_private *dev_priv = res->dev_priv;
	struct vmw_private *dev_priv = res->dev_priv;
	struct ttm_buffer_object *bo = val_buf->bo;
	struct ttm_buffer_object *bo = val_buf->bo;
	struct vmw_fence_obj *fence;
	struct vmw_fence_obj *fence;
	int ret;


	if (list_empty(&res->mob_head))
	if (list_empty(&res->mob_head))
		return 0;
		return 0;
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
	if (likely(fence != NULL))
	if (likely(fence != NULL))
		vmw_fence_obj_unreference(&fence);
		vmw_fence_obj_unreference(&fence);


	return ret;
	return 0;
}
}


/**
/**
+2 −8
Original line number Original line Diff line number Diff line
@@ -752,11 +752,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;
	dev_priv->active_master = &dev_priv->fbdev_master;



	dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
	dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
					       dev_priv->mmio_size);

	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
					    dev_priv->mmio_size);
					    dev_priv->mmio_size);


	if (unlikely(dev_priv->mmio_virt == NULL)) {
	if (unlikely(dev_priv->mmio_virt == NULL)) {
@@ -913,7 +909,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
out_err4:
out_err4:
	iounmap(dev_priv->mmio_virt);
	iounmap(dev_priv->mmio_virt);
out_err3:
out_err3:
	arch_phys_wc_del(dev_priv->mmio_mtrr);
	vmw_ttm_global_release(dev_priv);
	vmw_ttm_global_release(dev_priv);
out_err0:
out_err0:
	for (i = vmw_res_context; i < vmw_res_max; ++i)
	for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)


	ttm_object_device_release(&dev_priv->tdev);
	ttm_object_device_release(&dev_priv->tdev);
	iounmap(dev_priv->mmio_virt);
	iounmap(dev_priv->mmio_virt);
	arch_phys_wc_del(dev_priv->mmio_mtrr);
	if (dev_priv->ctx.staged_bindings)
	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
	vmw_ttm_global_release(dev_priv);
	vmw_ttm_global_release(dev_priv);
Loading