Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8038d2a9 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'vmwgfx-next-4.19-2' of git://people.freedesktop.org/~thomash/linux into drm-next



A series of cleanups / reorganizations and modesetting changes that
mostly target atomic state validation.

[airlied: conflicts with SPDX stuff in amdgpu tree]
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

Link: https://patchwork.freedesktop.org/patch/msgid/1a88485e-e509-b00e-8485-19194f074115@vmware.com
parents ba7ca97d 812a954b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
	    vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
	    vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
	    vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
	    vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
+29 −2
Original line number Diff line number Diff line
@@ -673,8 +673,34 @@ SVGASignedPoint;
 * SVGA_CAP_GBOBJECTS --
 *    Enable guest-backed objects and surfaces.
 *
 * SVGA_CAP_CMD_BUFFERS_3 --
 *    Enable support for command buffers in a mob.
 * SVGA_CAP_DX --
 *    Enable support for DX commands, and command buffers in a mob.
 *
 * SVGA_CAP_HP_CMD_QUEUE --
 *    Enable support for the high priority command queue, and the
 *    ScreenCopy command.
 *
 * SVGA_CAP_NO_BB_RESTRICTION --
 *    Allow ScreenTargets to be defined without regard to the 32-bpp
 *    bounding-box memory restrictions. ie:
 *
 *    The summed memory usage of all screens (assuming they were defined as
 *    32-bpp) must always be less than the value of the
 *    SVGA_REG_MAX_PRIMARY_MEM register.
 *
 *    If this cap is not present, the 32-bpp bounding box around all screens
 *    must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
 *    register.
 *
 *    If the cap is present, the bounding box restriction is lifted (and only
 *    the screen-sum limit applies).
 *
 *    (Note that this is a slight lie... there is still a sanity limit on any
 *     dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
 *     when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
 *     large enough to express any possible topology without holes between
 *     monitors.)
 *
 */

#define SVGA_CAP_NONE               0x00000000
@@ -700,6 +726,7 @@ SVGASignedPoint;
#define SVGA_CAP_GBOBJECTS          0x08000000
#define SVGA_CAP_DX                 0x10000000
#define SVGA_CAP_HP_CMD_QUEUE       0x20000000
#define SVGA_CAP_NO_BB_RESTRICTION  0x40000000

#define SVGA_CAP_CMD_RESERVED       0x80000000

+1123 −0

File added.

Preview size limit exceeded, changes collapsed.

+7 −7
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@ struct vmw_user_context {
	struct vmw_cmdbuf_res_manager *man;
	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
	spinlock_t cotable_lock;
	struct vmw_dma_buffer *dx_query_mob;
	struct vmw_buffer_object *dx_query_mob;
};

static void vmw_user_context_free(struct vmw_resource *res);
@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
					  &fence, NULL);

	vmw_fence_single_bo(bo, fence);
	vmw_bo_fence_single(bo, fence);

	if (likely(fence != NULL))
		vmw_fence_obj_unreference(&fence);
@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
					  &fence, NULL);

	vmw_fence_single_bo(bo, fence);
	vmw_bo_fence_single(bo, fence);

	if (likely(fence != NULL))
		vmw_fence_obj_unreference(&fence);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
 * specified in the parameter.  0 otherwise.
 */
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
			      struct vmw_dma_buffer *mob)
			      struct vmw_buffer_object *mob)
{
	struct vmw_user_context *uctx =
		container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
	if (mob == NULL) {
		if (uctx->dx_query_mob) {
			uctx->dx_query_mob->dx_query_ctx = NULL;
			vmw_dmabuf_unreference(&uctx->dx_query_mob);
			vmw_bo_unreference(&uctx->dx_query_mob);
			uctx->dx_query_mob = NULL;
		}

@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
	mob->dx_query_ctx  = ctx_res;

	if (!uctx->dx_query_mob)
		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
		uctx->dx_query_mob = vmw_bo_reference(mob);

	return 0;
}
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
 *
 * @ctx_res: The context resource
 */
struct vmw_dma_buffer *
struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
	struct vmw_user_context *uctx =
+7 −7
Original line number Diff line number Diff line
@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
	mutex_unlock(&dev_priv->binding_mutex);
	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
	vmw_fence_single_bo(bo, fence);
	vmw_bo_fence_single(bo, fence);
	if (likely(fence != NULL))
		vmw_fence_obj_unreference(&fence);

@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
	}

	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
	vmw_fence_single_bo(&res->backup->base, fence);
	vmw_bo_fence_single(&res->backup->base, fence);
	vmw_fence_obj_unreference(&fence);

	return 0;
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	struct ttm_operation_ctx ctx = { false, false };
	struct vmw_private *dev_priv = res->dev_priv;
	struct vmw_cotable *vcotbl = vmw_cotable(res);
	struct vmw_dma_buffer *buf, *old_buf = res->backup;
	struct vmw_buffer_object *buf, *old_buf = res->backup;
	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
	size_t old_size = res->backup_size;
	size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	if (!buf)
		return -ENOMEM;

	ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
			      true, vmw_dmabuf_bo_free);
	ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
			  true, vmw_bo_bo_free);
	if (ret) {
		DRM_ERROR("Failed initializing new cotable MOB.\n");
		return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	/* Let go of the old mob. */
	list_del(&res->mob_head);
	list_add_tail(&res->mob_head, &buf->res_list);
	vmw_dmabuf_unreference(&old_buf);
	vmw_bo_unreference(&old_buf);
	res->id = vcotbl->type;

	return 0;
@@ -491,7 +491,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
	ttm_bo_kunmap(&old_map);
out_wait:
	ttm_bo_unreserve(bo);
	vmw_dmabuf_unreference(&buf);
	vmw_bo_unreference(&buf);

	return ret;
}
Loading