Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f1d34bfd authored by Thomas Hellstrom's avatar Thomas Hellstrom
Browse files

drm/vmwgfx: Replace vmw_dma_buffer with vmw_buffer_object



Initially vmware buffer objects were only used as DMA buffers, so the name
DMA buffer was a natural one. However, currently they are used also as
dumb buffers and MOBs backing guest backed objects so renaming them to
buffer objects is logical. Particularly since there is a dmabuf subsystem
in the kernel where a dma buffer means something completely different.

This also renames user-space api structures and IOCTL names
correspondingly, but the old names remain defined for now and the ABI
hasn't changed.

There are a couple of minor style changes to make checkpatch happy.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
parent 07c13bb7
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
	    vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
+29 −29
Original line number Original line Diff line number Diff line
@@ -32,7 +32,7 @@




/**
/**
 * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
 * vmw_bo_pin_in_placement - Validate a buffer to placement.
 *
 *
 * @dev_priv:  Driver private.
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @buf:  DMA buffer to move.
@@ -42,8 +42,8 @@
 * Returns
 * Returns
 *  -ERESTARTSYS if interrupted by a signal.
 *  -ERESTARTSYS if interrupted by a signal.
 */
 */
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
			    struct vmw_buffer_object *buf,
			    struct ttm_placement *placement,
			    struct ttm_placement *placement,
			    bool interruptible)
			    bool interruptible)
{
{
@@ -79,7 +79,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
}
}


/**
/**
 * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 *
 *
 * This function takes the reservation_sem in write mode.
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
 * Flushes and unpins the query bo to avoid failures.
@@ -92,8 +92,8 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
 * Returns
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 * -ERESTARTSYS if interrupted by a signal.
 */
 */
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
			      struct vmw_buffer_object *buf,
			      bool interruptible)
			      bool interruptible)
{
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_operation_ctx ctx = {interruptible, false };
@@ -134,7 +134,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
}
}


/**
/**
 * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
 * vmw_bo_pin_in_vram - Move a buffer to vram.
 *
 *
 * This function takes the reservation_sem in write mode.
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
 * Flushes and unpins the query bo to avoid failures.
@@ -146,16 +146,16 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 * Returns
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 * -ERESTARTSYS if interrupted by a signal.
 */
 */
int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
			   struct vmw_dma_buffer *buf,
		       struct vmw_buffer_object *buf,
		       bool interruptible)
		       bool interruptible)
{
{
	return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
				       interruptible);
				       interruptible);
}
}


/**
/**
 * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
 *
 *
 * This function takes the reservation_sem in write mode.
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
 * Flushes and unpins the query bo to avoid failures.
@@ -167,8 +167,8 @@ int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
 * Returns
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 * -ERESTARTSYS if interrupted by a signal.
 */
 */
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
				    struct vmw_dma_buffer *buf,
				struct vmw_buffer_object *buf,
				bool interruptible)
				bool interruptible)
{
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_operation_ctx ctx = {interruptible, false };
@@ -226,7 +226,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
}
}


/**
/**
 * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
 *
 *
 * This function takes the reservation_sem in write mode.
 * This function takes the reservation_sem in write mode.
 *
 *
@@ -237,8 +237,8 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
 * Returns
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 * -ERESTARTSYS if interrupted by a signal.
 */
 */
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
int vmw_bo_unpin(struct vmw_private *dev_priv,
		     struct vmw_dma_buffer *buf,
		 struct vmw_buffer_object *buf,
		 bool interruptible)
		 bool interruptible)
{
{
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_buffer_object *bo = &buf->base;
@@ -288,7 +288,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 * @pin: Whether to pin or unpin.
 * @pin: Whether to pin or unpin.
 *
 *
 */
 */
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
{
{
	struct ttm_operation_ctx ctx = { false, true };
	struct ttm_operation_ctx ctx = { false, true };
	struct ttm_place pl;
	struct ttm_place pl;
@@ -326,14 +326,14 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)




/*
/*
 * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
 * vmw_buffer_object_unmap - Tear down a cached buffer object map.
 *
 *
 * @vbo: The buffer object whose map we are tearing down.
 * @vbo: The buffer object whose map we are tearing down.
 *
 *
 * This function tears down a cached map set up using
 * This function tears down a cached map set up using
 * vmw_dma_buffer_map_and_cache().
 * vmw_buffer_object_map_and_cache().
 */
 */
void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo)
{
{
	if (vbo->map.bo == NULL)
	if (vbo->map.bo == NULL)
		return;
		return;
@@ -343,7 +343,7 @@ void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)




/*
/*
 * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
 * vmw_buffer_object_map_and_cache - Map a buffer object and cache the map
 *
 *
 * @vbo: The buffer object to map
 * @vbo: The buffer object to map
 * Return: A kernel virtual address or NULL if mapping failed.
 * Return: A kernel virtual address or NULL if mapping failed.
@@ -357,7 +357,7 @@ void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
 * 3) Buffer object destruction
 * 3) Buffer object destruction
 *
 *
 */
 */
void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo)
{
{
	struct ttm_buffer_object *bo = &vbo->base;
	struct ttm_buffer_object *bo = &vbo->base;
	bool not_used;
	bool not_used;
+5 −5
Original line number Original line Diff line number Diff line
@@ -38,7 +38,7 @@ struct vmw_user_context {
	struct vmw_cmdbuf_res_manager *man;
	struct vmw_cmdbuf_res_manager *man;
	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
	spinlock_t cotable_lock;
	spinlock_t cotable_lock;
	struct vmw_dma_buffer *dx_query_mob;
	struct vmw_buffer_object *dx_query_mob;
};
};


static void vmw_user_context_free(struct vmw_resource *res);
static void vmw_user_context_free(struct vmw_resource *res);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
 * specified in the parameter.  0 otherwise.
 * specified in the parameter.  0 otherwise.
 */
 */
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
			      struct vmw_dma_buffer *mob)
			      struct vmw_buffer_object *mob)
{
{
	struct vmw_user_context *uctx =
	struct vmw_user_context *uctx =
		container_of(ctx_res, struct vmw_user_context, res);
		container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
	if (mob == NULL) {
	if (mob == NULL) {
		if (uctx->dx_query_mob) {
		if (uctx->dx_query_mob) {
			uctx->dx_query_mob->dx_query_ctx = NULL;
			uctx->dx_query_mob->dx_query_ctx = NULL;
			vmw_dmabuf_unreference(&uctx->dx_query_mob);
			vmw_bo_unreference(&uctx->dx_query_mob);
			uctx->dx_query_mob = NULL;
			uctx->dx_query_mob = NULL;
		}
		}


@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
	mob->dx_query_ctx  = ctx_res;
	mob->dx_query_ctx  = ctx_res;


	if (!uctx->dx_query_mob)
	if (!uctx->dx_query_mob)
		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
		uctx->dx_query_mob = vmw_bo_reference(mob);


	return 0;
	return 0;
}
}
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
 *
 *
 * @ctx_res: The context resource
 * @ctx_res: The context resource
 */
 */
struct vmw_dma_buffer *
struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
{
	struct vmw_user_context *uctx =
	struct vmw_user_context *uctx =
+5 −5
Original line number Original line Diff line number Diff line
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	struct ttm_operation_ctx ctx = { false, false };
	struct ttm_operation_ctx ctx = { false, false };
	struct vmw_private *dev_priv = res->dev_priv;
	struct vmw_private *dev_priv = res->dev_priv;
	struct vmw_cotable *vcotbl = vmw_cotable(res);
	struct vmw_cotable *vcotbl = vmw_cotable(res);
	struct vmw_dma_buffer *buf, *old_buf = res->backup;
	struct vmw_buffer_object *buf, *old_buf = res->backup;
	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
	size_t old_size = res->backup_size;
	size_t old_size = res->backup_size;
	size_t old_size_read_back = vcotbl->size_read_back;
	size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	if (!buf)
	if (!buf)
		return -ENOMEM;
		return -ENOMEM;


	ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
	ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
			      true, vmw_dmabuf_bo_free);
			  true, vmw_bo_bo_free);
	if (ret) {
	if (ret) {
		DRM_ERROR("Failed initializing new cotable MOB.\n");
		DRM_ERROR("Failed initializing new cotable MOB.\n");
		return ret;
		return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	/* Let go of the old mob. */
	/* Let go of the old mob. */
	list_del(&res->mob_head);
	list_del(&res->mob_head);
	list_add_tail(&res->mob_head, &buf->res_list);
	list_add_tail(&res->mob_head, &buf->res_list);
	vmw_dmabuf_unreference(&old_buf);
	vmw_bo_unreference(&old_buf);
	res->id = vcotbl->type;
	res->id = vcotbl->type;


	return 0;
	return 0;
@@ -491,7 +491,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	ttm_bo_kunmap(&old_map);
	ttm_bo_kunmap(&old_map);
out_wait:
out_wait:
	ttm_bo_unreserve(bo);
	ttm_bo_unreserve(bo);
	vmw_dmabuf_unreference(&buf);
	vmw_bo_unreference(&buf);


	return ret;
	return ret;
}
}
+9 −9
Original line number Original line Diff line number Diff line
@@ -153,9 +153,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
static const struct drm_ioctl_desc vmw_ioctls[] = {
	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
		      DRM_AUTH | DRM_RENDER_ALLOW),
		      DRM_AUTH | DRM_RENDER_ALLOW),
	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
		      DRM_AUTH | DRM_RENDER_ALLOW),
		      DRM_AUTH | DRM_RENDER_ALLOW),
	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
		      DRM_RENDER_ALLOW),
		      DRM_RENDER_ALLOW),
	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
		      vmw_kms_cursor_bypass_ioctl,
		      vmw_kms_cursor_bypass_ioctl,
@@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
		      vmw_gb_surface_reference_ioctl,
		      vmw_gb_surface_reference_ioctl,
		      DRM_AUTH | DRM_RENDER_ALLOW),
		      DRM_AUTH | DRM_RENDER_ALLOW),
	VMW_IOCTL_DEF(VMW_SYNCCPU,
	VMW_IOCTL_DEF(VMW_SYNCCPU,
		      vmw_user_dmabuf_synccpu_ioctl,
		      vmw_user_bo_synccpu_ioctl,
		      DRM_RENDER_ALLOW),
		      DRM_RENDER_ALLOW),
	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
		      vmw_extended_context_define_ioctl,
		      vmw_extended_context_define_ioctl,
@@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
{
	int ret;
	int ret;
	struct vmw_dma_buffer *vbo;
	struct vmw_buffer_object *vbo;
	struct ttm_bo_kmap_obj map;
	struct ttm_bo_kmap_obj map;
	volatile SVGA3dQueryResult *result;
	volatile SVGA3dQueryResult *result;
	bool dummy;
	bool dummy;
@@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
	if (!vbo)
	if (!vbo)
		return -ENOMEM;
		return -ENOMEM;


	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
	ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
			  &vmw_sys_ne_placement, false,
			  &vmw_sys_ne_placement, false,
			      &vmw_dmabuf_bo_free);
			  &vmw_bo_bo_free);
	if (unlikely(ret != 0))
	if (unlikely(ret != 0))
		return ret;
		return ret;


@@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)


	if (unlikely(ret != 0)) {
	if (unlikely(ret != 0)) {
		DRM_ERROR("Dummy query buffer map failed.\n");
		DRM_ERROR("Dummy query buffer map failed.\n");
		vmw_dmabuf_unreference(&vbo);
		vmw_bo_unreference(&vbo);
	} else
	} else
		dev_priv->dummy_query_bo = vbo;
		dev_priv->dummy_query_bo = vbo;


@@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)


	BUG_ON(dev_priv->pinned_bo != NULL);
	BUG_ON(dev_priv->pinned_bo != NULL);


	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
	vmw_bo_unreference(&dev_priv->dummy_query_bo);
	if (dev_priv->cman)
	if (dev_priv->cman)
		vmw_cmdbuf_remove_pool(dev_priv->cman);
		vmw_cmdbuf_remove_pool(dev_priv->cman);


Loading