Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ade51015 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge branch 'drm-vmware-next' into drm-core-next

* drm-vmware-next:
  drm/vmwgfx: Bump driver minor
  vmwgfx: Move function declaration to correct header
  drm/vmwgfx: Treat out-of-range initial width and height as host errors
  vmwgfx: Pick up the initial size from the width and height regs
  vmwgfx: Add page flip support
  vmwgfx: Pipe fence out of screen object dirty functions
  vmwgfx: Make it possible to get fence from execbuf
  vmwgfx: Clean up pending event references to struct drm_file objects on close
  vmwgfx: Rework fence event action
parents 8bf42225 0a240ec4
Loading
Loading
Loading
Loading
+53 −0
Original line number Diff line number Diff line
@@ -38,6 +38,10 @@
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0

#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600


/**
 * Fully encoded drm commands. Might move to vmw_drm.h
 */
@@ -387,6 +391,41 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
	BUG_ON(n3d < 0);
}

/**
 * Sets the initial_[width|height] fields on the given vmw_private.
 *
 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
 * clamping the value to fb_max_[width|height] fields and the
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 * If the values appear to be invalid, set them to
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 */
static void vmw_get_initial_size(struct vmw_private *dev_priv)
{
	uint32_t width;
	uint32_t height;

	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);

	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);

	if (width > dev_priv->fb_max_width ||
	    height > dev_priv->fb_max_height) {

		/*
		 * This is a host error and shouldn't occur.
		 */

		width = VMW_MIN_INITIAL_WIDTH;
		height = VMW_MIN_INITIAL_HEIGHT;
	}

	dev_priv->initial_width = width;
	dev_priv->initial_height = height;
}

static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
@@ -441,6 +480,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

	vmw_get_initial_size(dev_priv);

	if (dev_priv->capabilities & SVGA_CAP_GMR) {
		dev_priv->max_gmr_descriptors =
			vmw_read(dev_priv,
@@ -688,6 +730,15 @@ static int vmw_driver_unload(struct drm_device *dev)
	return 0;
}

static void vmw_preclose(struct drm_device *dev,
			 struct drm_file *file_priv)
{
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	struct vmw_private *dev_priv = vmw_priv(dev);

	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
}

static void vmw_postclose(struct drm_device *dev,
			 struct drm_file *file_priv)
{
@@ -710,6 +761,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
	if (unlikely(vmw_fp == NULL))
		return ret;

	INIT_LIST_HEAD(&vmw_fp->fence_events);
	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
	if (unlikely(vmw_fp->tfile == NULL))
		goto out_no_tfile;
@@ -1102,6 +1154,7 @@ static struct drm_driver driver = {
	.master_set = vmw_master_set,
	.master_drop = vmw_master_drop,
	.open = vmw_driver_open,
	.preclose = vmw_preclose,
	.postclose = vmw_postclose,
	.fops = &vmwgfx_driver_fops,
	.name = VMWGFX_DRIVER_NAME,
+7 −3
Original line number Diff line number Diff line
@@ -40,9 +40,9 @@
#include "ttm/ttm_module.h"
#include "vmwgfx_fence.h"

#define VMWGFX_DRIVER_DATE "20111025"
#define VMWGFX_DRIVER_DATE "20120209"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 3
#define VMWGFX_DRIVER_MINOR 4
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -62,6 +62,7 @@
struct vmw_fpriv {
	struct drm_master *locked_master;
	struct ttm_object_file *tfile;
	struct list_head fence_events;
};

struct vmw_dma_buffer {
@@ -202,6 +203,8 @@ struct vmw_private {
	uint32_t mmio_size;
	uint32_t fb_max_width;
	uint32_t fb_max_height;
	uint32_t initial_width;
	uint32_t initial_height;
	__le32 __iomem *mmio_virt;
	int mmio_mtrr;
	uint32_t capabilities;
@@ -533,7 +536,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
			       uint32_t command_size,
			       uint64_t throttle_us,
			       struct drm_vmw_fence_rep __user
			       *user_fence_rep);
			       *user_fence_rep,
			       struct vmw_fence_obj **out_fence);

extern void
vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+11 −4
Original line number Diff line number Diff line
@@ -1109,10 +1109,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
			void *kernel_commands,
			uint32_t command_size,
			uint64_t throttle_us,
			struct drm_vmw_fence_rep __user *user_fence_rep)
			struct drm_vmw_fence_rep __user *user_fence_rep,
			struct vmw_fence_obj **out_fence)
{
	struct vmw_sw_context *sw_context = &dev_priv->ctx;
	struct vmw_fence_obj *fence;
	struct vmw_fence_obj *fence = NULL;
	uint32_t handle;
	void *cmd;
	int ret;
@@ -1208,8 +1209,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
				    user_fence_rep, fence, handle);

	if (likely(fence != NULL))
	/* Don't unreference when handing fence out */
	if (unlikely(out_fence != NULL)) {
		*out_fence = fence;
		fence = NULL;
	} else if (likely(fence != NULL)) {
		vmw_fence_obj_unreference(&fence);
	}

	mutex_unlock(&dev_priv->cmdbuf_mutex);
	return 0;
@@ -1362,7 +1368,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
	ret = vmw_execbuf_process(file_priv, dev_priv,
				  (void __user *)(unsigned long)arg->commands,
				  NULL, arg->command_size, arg->throttle_us,
				  (void __user *)(unsigned long)arg->fence_rep);
				  (void __user *)(unsigned long)arg->fence_rep,
				  NULL);

	if (unlikely(ret != 0))
		goto out_unlock;
+2 −6
Original line number Diff line number Diff line
@@ -414,10 +414,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
	int ret;

	/* XXX These shouldn't be hardcoded. */
	initial_width = 800;
	initial_height = 600;

	fb_bpp = 32;
	fb_depth = 24;

@@ -425,8 +421,8 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);

	initial_width = min(fb_width, initial_width);
	initial_height = min(fb_height, initial_height);
	initial_width = min(vmw_priv->initial_width, fb_width);
	initial_height = min(vmw_priv->initial_height, fb_height);

	fb_pitch = fb_width * fb_bpp / 8;
	fb_size = fb_pitch * fb_height;
+141 −111
Original line number Diff line number Diff line
@@ -69,12 +69,13 @@ struct vmw_user_fence {
 * be assigned the current time tv_usec val when the fence signals.
 */
struct vmw_event_fence_action {
	struct drm_pending_event e;
	struct vmw_fence_action action;
	struct list_head fpriv_head;

	struct drm_pending_event *event;
	struct vmw_fence_obj *fence;
	struct drm_device *dev;
	struct kref kref;
	uint32_t size;

	uint32_t *tv_sec;
	uint32_t *tv_usec;
};
@@ -784,46 +785,40 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
}

/**
 * vmw_event_fence_action_destroy
 * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
 *
 * @kref: The struct kref embedded in a struct vmw_event_fence_action.
 * @fman: Pointer to a struct vmw_fence_manager
 * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
 * with pointers to a struct drm_file object about to be closed.
 *
 * The vmw_event_fence_action destructor that may be called either after
 * the fence action cleanup, or when the event is delivered.
 * It frees both the vmw_event_fence_action struct and the actual
 * event structure copied to user-space.
 * This function removes all pending fence events with references to a
 * specific struct drm_file object about to be closed. The caller is required
 * to pass a list of all struct vmw_event_fence_action objects with such
 * events attached. This function is typically called before the
 * struct drm_file object's event management is taken down.
 */
static void vmw_event_fence_action_destroy(struct kref *kref)
void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
				struct list_head *event_list)
{
	struct vmw_event_fence_action *eaction =
		container_of(kref, struct vmw_event_fence_action, kref);
	struct ttm_mem_global *mem_glob =
		vmw_mem_glob(vmw_priv(eaction->dev));
	uint32_t size = eaction->size;
	struct vmw_event_fence_action *eaction;
	struct drm_pending_event *event;
	unsigned long irq_flags;

	kfree(eaction->e.event);
	kfree(eaction);
	ttm_mem_global_free(mem_glob, size);
	while (1) {
		spin_lock_irqsave(&fman->lock, irq_flags);
		if (list_empty(event_list))
			goto out_unlock;
		eaction = list_first_entry(event_list,
					   struct vmw_event_fence_action,
					   fpriv_head);
		list_del_init(&eaction->fpriv_head);
		event = eaction->event;
		eaction->event = NULL;
		spin_unlock_irqrestore(&fman->lock, irq_flags);
		event->destroy(event);
	}


/**
 * vmw_event_fence_action_delivered
 *
 * @e: The struct drm_pending_event embedded in a struct
 * vmw_event_fence_action.
 *
 * The struct drm_pending_event destructor that is called by drm
 * once the event is delivered. Since we don't know whether this function
 * will be called before or after the fence action destructor, we
 * free a refcount and destroy if it becomes zero.
 */
static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
{
	struct vmw_event_fence_action *eaction =
		container_of(e, struct vmw_event_fence_action, e);

	kref_put(&eaction->kref, vmw_event_fence_action_destroy);
out_unlock:
	spin_unlock_irqrestore(&fman->lock, irq_flags);
}


@@ -836,18 +831,21 @@ static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
 * This function is called when the seqno of the fence where @action is
 * attached has passed. It queues the event on the submitter's event list.
 * This function is always called from atomic context, and may be called
 * from irq context. It ups a refcount reflecting that we now have two
 * destructors.
 * from irq context.
 */
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{
	struct vmw_event_fence_action *eaction =
		container_of(action, struct vmw_event_fence_action, action);
	struct drm_device *dev = eaction->dev;
	struct drm_file *file_priv = eaction->e.file_priv;
	struct drm_pending_event *event = eaction->event;
	struct drm_file *file_priv;
	unsigned long irq_flags;

	kref_get(&eaction->kref);
	if (unlikely(event == NULL))
		return;

	file_priv = event->file_priv;
	spin_lock_irqsave(&dev->event_lock, irq_flags);

	if (likely(eaction->tv_sec != NULL)) {
@@ -858,7 +856,9 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
		*eaction->tv_usec = tv.tv_usec;
	}

	list_add_tail(&eaction->e.link, &file_priv->event_list);
	list_del_init(&eaction->fpriv_head);
	list_add_tail(&eaction->event->link, &file_priv->event_list);
	eaction->event = NULL;
	wake_up_all(&file_priv->event_wait);
	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
}
@@ -876,9 +876,15 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
{
	struct vmw_event_fence_action *eaction =
		container_of(action, struct vmw_event_fence_action, action);
	struct vmw_fence_manager *fman = eaction->fence->fman;
	unsigned long irq_flags;

	spin_lock_irqsave(&fman->lock, irq_flags);
	list_del(&eaction->fpriv_head);
	spin_unlock_irqrestore(&fman->lock, irq_flags);

	vmw_fence_obj_unreference(&eaction->fence);
	kref_put(&eaction->kref, vmw_event_fence_action_destroy);
	kfree(eaction);
}


@@ -946,39 +952,23 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 * an error code, the caller needs to free that object.
 */

int vmw_event_fence_action_create(struct drm_file *file_priv,
int vmw_event_fence_action_queue(struct drm_file *file_priv,
				 struct vmw_fence_obj *fence,
				  struct drm_event *event,
				 struct drm_pending_event *event,
				 uint32_t *tv_sec,
				 uint32_t *tv_usec,
				 bool interruptible)
{
	struct vmw_event_fence_action *eaction;
	struct ttm_mem_global *mem_glob =
		vmw_mem_glob(fence->fman->dev_priv);
	struct vmw_fence_manager *fman = fence->fman;
	uint32_t size = fman->event_fence_action_size +
		ttm_round_pot(event->length);
	int ret;

	/*
	 * Account for internal structure size as well as the
	 * event size itself.
	 */

	ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
	if (unlikely(ret != 0))
		return ret;
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	unsigned long irq_flags;

	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
	if (unlikely(eaction == NULL)) {
		ttm_mem_global_free(mem_glob, size);
	if (unlikely(eaction == NULL))
		return -ENOMEM;
	}

	eaction->e.event = event;
	eaction->e.file_priv = file_priv;
	eaction->e.destroy = vmw_event_fence_action_delivered;
	eaction->event = event;

	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
	eaction->action.cleanup = vmw_event_fence_action_cleanup;
@@ -986,16 +976,89 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,

	eaction->fence = vmw_fence_obj_reference(fence);
	eaction->dev = fman->dev_priv->dev;
	eaction->size = size;
	eaction->tv_sec = tv_sec;
	eaction->tv_usec = tv_usec;

	kref_init(&eaction->kref);
	spin_lock_irqsave(&fman->lock, irq_flags);
	list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
	spin_unlock_irqrestore(&fman->lock, irq_flags);

	vmw_fence_obj_add_action(fence, &eaction->action);

	return 0;
}

struct vmw_event_fence_pending {
	struct drm_pending_event base;
	struct drm_vmw_event_fence event;
};

int vmw_event_fence_action_create(struct drm_file *file_priv,
				  struct vmw_fence_obj *fence,
				  uint32_t flags,
				  uint64_t user_data,
				  bool interruptible)
{
	struct vmw_event_fence_pending *event;
	struct drm_device *dev = fence->fman->dev_priv->dev;
	unsigned long irq_flags;
	int ret;

	spin_lock_irqsave(&dev->event_lock, irq_flags);

	ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
	if (likely(ret == 0))
		file_priv->event_space -= sizeof(event->event);

	spin_unlock_irqrestore(&dev->event_lock, irq_flags);

	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed to allocate event space for this file.\n");
		goto out_no_space;
	}


	event = kzalloc(sizeof(event->event), GFP_KERNEL);
	if (unlikely(event == NULL)) {
		DRM_ERROR("Failed to allocate an event.\n");
		ret = -ENOMEM;
		goto out_no_event;
	}

	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
	event->event.base.length = sizeof(*event);
	event->event.user_data = user_data;

	event->base.event = &event->event.base;
	event->base.file_priv = file_priv;
	event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;


	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
		ret = vmw_event_fence_action_queue(file_priv, fence,
						   &event->base,
						   &event->event.tv_sec,
						   &event->event.tv_usec,
						   interruptible);
	else
		ret = vmw_event_fence_action_queue(file_priv, fence,
						   &event->base,
						   NULL,
						   NULL,
						   interruptible);
	if (ret != 0)
		goto out_no_queue;

out_no_queue:
	event->base.destroy(&event->base);
out_no_event:
	spin_lock_irqsave(&dev->event_lock, irq_flags);
	file_priv->event_space += sizeof(*event);
	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
out_no_space:
	return ret;
}

int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
{
@@ -1008,8 +1071,6 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
		(struct drm_vmw_fence_rep __user *)(unsigned long)
		arg->fence_rep;
	uint32_t handle;
	unsigned long irq_flags;
	struct drm_vmw_event_fence *event;
	int ret;

	/*
@@ -1062,59 +1123,28 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,

	BUG_ON(fence == NULL);

	spin_lock_irqsave(&dev->event_lock, irq_flags);

	ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
	if (likely(ret == 0))
		file_priv->event_space -= sizeof(*event);

	spin_unlock_irqrestore(&dev->event_lock, irq_flags);

	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed to allocate event space for this file.\n");
		goto out_no_event_space;
	}

	event = kzalloc(sizeof(*event), GFP_KERNEL);
	if (unlikely(event == NULL)) {
		DRM_ERROR("Failed to allocate an event.\n");
		goto out_no_event;
	}

	event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
	event->base.length = sizeof(*event);
	event->user_data = arg->user_data;

	if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
		ret = vmw_event_fence_action_create(file_priv, fence,
						    &event->base,
						    &event->tv_sec,
						    &event->tv_usec,
						    arg->flags,
						    arg->user_data,
						    true);
	else
		ret = vmw_event_fence_action_create(file_priv, fence,
						    &event->base,
						    NULL,
						    NULL,
						    arg->flags,
						    arg->user_data,
						    true);

	if (unlikely(ret != 0)) {
		if (ret != -ERESTARTSYS)
			DRM_ERROR("Failed to attach event to fence.\n");
		goto out_no_attach;
		goto out_no_create;
	}

	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
				    handle);
	vmw_fence_obj_unreference(&fence);
	return 0;
out_no_attach:
	kfree(event);
out_no_event:
	spin_lock_irqsave(&dev->event_lock, irq_flags);
	file_priv->event_space += sizeof(*event);
	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
out_no_event_space:
out_no_create:
	if (user_fence_rep != NULL)
		ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
					  handle, TTM_REF_USAGE);
Loading