Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65705962 authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie
Browse files

drm/ttm/vmwgfx: Have TTM manage the validation sequence.



Rather than having the driver supply the validation sequence, leave that
responsibility to TTM. This saves some confusion and a function argument.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 95762c2b
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -1539,6 +1539,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
	bdev->dev_mapping = NULL;
	bdev->dev_mapping = NULL;
	bdev->glob = glob;
	bdev->glob = glob;
	bdev->need_dma32 = need_dma32;
	bdev->need_dma32 = need_dma32;
	bdev->val_seq = 0;
	spin_lock_init(&bdev->fence_lock);
	spin_lock_init(&bdev->fence_lock);
	mutex_lock(&glob->device_list_mutex);
	mutex_lock(&glob->device_list_mutex);
	list_add_tail(&bdev->device_list, &glob->device_list);
	list_add_tail(&bdev->device_list, &glob->device_list);
+4 −1
Original line number Original line Diff line number Diff line
@@ -126,11 +126,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 * buffers in different orders.
 * buffers in different orders.
 */
 */


int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
int ttm_eu_reserve_buffers(struct list_head *list)
{
{
	struct ttm_bo_global *glob;
	struct ttm_bo_global *glob;
	struct ttm_validate_buffer *entry;
	struct ttm_validate_buffer *entry;
	int ret;
	int ret;
	uint32_t val_seq;


	if (list_empty(list))
	if (list_empty(list))
		return 0;
		return 0;
@@ -146,6 +147,8 @@ int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)


retry:
retry:
	spin_lock(&glob->lru_lock);
	spin_lock(&glob->lru_lock);
	val_seq = entry->bo->bdev->val_seq++;

	list_for_each_entry(entry, list, head) {
	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
		struct ttm_buffer_object *bo = entry->bo;


+0 −1
Original line number Original line Diff line number Diff line
@@ -264,7 +264,6 @@ struct vmw_private {
	 */
	 */


	struct vmw_sw_context ctx;
	struct vmw_sw_context ctx;
	uint32_t val_seq;
	struct mutex cmdbuf_mutex;
	struct mutex cmdbuf_mutex;


	/**
	/**
+1 −2
Original line number Original line Diff line number Diff line
@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
	if (unlikely(ret != 0))
	if (unlikely(ret != 0))
		goto out_err;
		goto out_err;
	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
				     dev_priv->val_seq++);
	if (unlikely(ret != 0))
	if (unlikely(ret != 0))
		goto out_err;
		goto out_err;


+2 −0
Original line number Original line Diff line number Diff line
@@ -515,6 +515,7 @@ struct ttm_bo_global {
 * @addr_space_mm: Range manager for the device address space.
 * @addr_space_mm: Range manager for the device address space.
 * lru_lock: Spinlock that protects the buffer+device lru lists and
 * lru_lock: Spinlock that protects the buffer+device lru lists and
 * ddestroy lists.
 * ddestroy lists.
 * @val_seq: Current validation sequence.
 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
 * If a GPU lockup has been detected, this is forced to 0.
 * If a GPU lockup has been detected, this is forced to 0.
 * @dev_mapping: A pointer to the struct address_space representing the
 * @dev_mapping: A pointer to the struct address_space representing the
@@ -544,6 +545,7 @@ struct ttm_bo_device {
	 * Protected by the global:lru lock.
	 * Protected by the global:lru lock.
	 */
	 */
	struct list_head ddestroy;
	struct list_head ddestroy;
	uint32_t val_seq;


	/*
	/*
	 * Protected by load / firstopen / lastclose /unload sync.
	 * Protected by load / firstopen / lastclose /unload sync.
Loading