Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7eae626 authored by Thomas Hellstrom's avatar Thomas Hellstrom
Browse files

drm/vmwgfx: Make the object handles idr-generated



Instead of generating user-space object handles based on a, possibly
processed, hash of the kernel address of the object, use idr to generate
and lookup those handles. This might improve somewhat on security since
we loose all connections to the object's kernel address. Also idr is
designed to do just this.

As a todo-item, since user-space handles are now generated in sequence,
we can probably use a much simpler hash function to hash them.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
parent b1d05b4f
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -261,7 +261,7 @@ int ttm_vt_lock(struct ttm_lock *lock,
int ttm_vt_unlock(struct ttm_lock *lock)
int ttm_vt_unlock(struct ttm_lock *lock)
{
{
	return ttm_ref_object_base_unref(lock->vt_holder,
	return ttm_ref_object_base_unref(lock->vt_holder,
					 lock->base.hash.key, TTM_REF_USAGE);
					 lock->base.handle, TTM_REF_USAGE);
}
}


void ttm_suspend_unlock(struct ttm_lock *lock)
void ttm_suspend_unlock(struct ttm_lock *lock)
+20 −22
Original line number Original line Diff line number Diff line
@@ -94,6 +94,7 @@ struct ttm_object_device {
	struct dma_buf_ops ops;
	struct dma_buf_ops ops;
	void (*dmabuf_release)(struct dma_buf *dma_buf);
	void (*dmabuf_release)(struct dma_buf *dma_buf);
	size_t dma_buf_size;
	size_t dma_buf_size;
	struct idr idr;
};
};


/**
/**
@@ -171,14 +172,15 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
	base->ref_obj_release = ref_obj_release;
	base->ref_obj_release = ref_obj_release;
	base->object_type = object_type;
	base->object_type = object_type;
	kref_init(&base->refcount);
	kref_init(&base->refcount);
	idr_preload(GFP_KERNEL);
	spin_lock(&tdev->object_lock);
	spin_lock(&tdev->object_lock);
	ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
	ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
					    &base->hash,
					    (unsigned long)base, 31, 0, 0);
	spin_unlock(&tdev->object_lock);
	spin_unlock(&tdev->object_lock);
	if (unlikely(ret != 0))
	idr_preload_end();
		goto out_err0;
	if (ret < 0)
		return ret;


	base->handle = ret;
	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
	if (unlikely(ret != 0))
	if (unlikely(ret != 0))
		goto out_err1;
		goto out_err1;
@@ -188,9 +190,8 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
	return 0;
	return 0;
out_err1:
out_err1:
	spin_lock(&tdev->object_lock);
	spin_lock(&tdev->object_lock);
	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
	idr_remove(&tdev->idr, base->handle);
	spin_unlock(&tdev->object_lock);
	spin_unlock(&tdev->object_lock);
out_err0:
	return ret;
	return ret;
}
}


@@ -201,7 +202,7 @@ static void ttm_release_base(struct kref *kref)
	struct ttm_object_device *tdev = base->tfile->tdev;
	struct ttm_object_device *tdev = base->tfile->tdev;


	spin_lock(&tdev->object_lock);
	spin_lock(&tdev->object_lock);
	(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
	idr_remove(&tdev->idr, base->handle);
	spin_unlock(&tdev->object_lock);
	spin_unlock(&tdev->object_lock);


	/*
	/*
@@ -248,19 +249,13 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct ttm_base_object *
struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
{
{
	struct ttm_base_object *base = NULL;
	struct ttm_base_object *base;
	struct drm_hash_item *hash;
	struct drm_open_hash *ht = &tdev->object_hash;
	int ret;


	rcu_read_lock();
	rcu_read_lock();
	ret = drm_ht_find_item_rcu(ht, key, &hash);
	base = idr_find(&tdev->idr, key);


	if (likely(ret == 0)) {
	if (base && !kref_get_unless_zero(&base->refcount))
		base = drm_hash_entry(hash, struct ttm_base_object, hash);
		if (!kref_get_unless_zero(&base->refcount))
		base = NULL;
		base = NULL;
	}
	rcu_read_unlock();
	rcu_read_unlock();


	return base;
	return base;
@@ -284,7 +279,7 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
	struct ttm_ref_object *ref;
	struct ttm_ref_object *ref;


	rcu_read_lock();
	rcu_read_lock();
	if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
	if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0))
		goto out_false;
		goto out_false;


	/*
	/*
@@ -334,7 +329,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,


	while (ret == -EINVAL) {
	while (ret == -EINVAL) {
		rcu_read_lock();
		rcu_read_lock();
		ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
		ret = drm_ht_find_item_rcu(ht, base->handle, &hash);


		if (ret == 0) {
		if (ret == 0) {
			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
@@ -358,7 +353,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
			return -ENOMEM;
			return -ENOMEM;
		}
		}


		ref->hash.key = base->hash.key;
		ref->hash.key = base->handle;
		ref->obj = base;
		ref->obj = base;
		ref->tfile = tfile;
		ref->tfile = tfile;
		ref->ref_type = ref_type;
		ref->ref_type = ref_type;
@@ -510,6 +505,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
	if (ret != 0)
	if (ret != 0)
		goto out_no_object_hash;
		goto out_no_object_hash;


	idr_init(&tdev->idr);
	tdev->ops = *ops;
	tdev->ops = *ops;
	tdev->dmabuf_release = tdev->ops.release;
	tdev->dmabuf_release = tdev->ops.release;
	tdev->ops.release = ttm_prime_dmabuf_release;
	tdev->ops.release = ttm_prime_dmabuf_release;
@@ -528,6 +524,8 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)


	*p_tdev = NULL;
	*p_tdev = NULL;


	WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
	idr_destroy(&tdev->idr);
	drm_ht_remove(&tdev->object_hash);
	drm_ht_remove(&tdev->object_hash);


	kfree(tdev);
	kfree(tdev);
@@ -630,7 +628,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,


	prime = (struct ttm_prime_object *) dma_buf->priv;
	prime = (struct ttm_prime_object *) dma_buf->priv;
	base = &prime->base;
	base = &prime->base;
	*handle = base->hash.key;
	*handle = base->handle;
	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);


	dma_buf_put(dma_buf);
	dma_buf_put(dma_buf);
+10 −3
Original line number Original line Diff line number Diff line
@@ -124,14 +124,14 @@ struct ttm_object_device;


struct ttm_base_object {
struct ttm_base_object {
	struct rcu_head rhead;
	struct rcu_head rhead;
	struct drm_hash_item hash;
	enum ttm_object_type object_type;
	bool shareable;
	struct ttm_object_file *tfile;
	struct ttm_object_file *tfile;
	struct kref refcount;
	struct kref refcount;
	void (*refcount_release) (struct ttm_base_object **base);
	void (*refcount_release) (struct ttm_base_object **base);
	void (*ref_obj_release) (struct ttm_base_object *base,
	void (*ref_obj_release) (struct ttm_base_object *base,
				 enum ttm_ref_type ref_type);
				 enum ttm_ref_type ref_type);
	u32 handle;
	enum ttm_object_type object_type;
	u32 shareable;
};
};




@@ -350,4 +350,11 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,


#define ttm_prime_object_kfree(__obj, __prime)		\
#define ttm_prime_object_kfree(__obj, __prime)		\
	kfree_rcu(__obj, __prime.base.rhead)
	kfree_rcu(__obj, __prime.base.rhead)

/*
 * Extra memory required by the base object's idr storage, which is allocated
 * separately from the base object itself. We estimate an on-average 128 bytes
 * per idr.
 */
#define TTM_OBJ_EXTRA_SIZE 128
#endif
#endif
+4 −3
Original line number Original line Diff line number Diff line
@@ -441,7 +441,8 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
		struct_size = backend_size +
		struct_size = backend_size +
			ttm_round_pot(sizeof(struct vmw_buffer_object));
			ttm_round_pot(sizeof(struct vmw_buffer_object));
		user_struct_size = backend_size +
		user_struct_size = backend_size +
			ttm_round_pot(sizeof(struct vmw_user_buffer_object));
		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
				      TTM_OBJ_EXTRA_SIZE;
	}
	}


	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -631,7 +632,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
		*p_base = &user_bo->prime.base;
		*p_base = &user_bo->prime.base;
		kref_get(&(*p_base)->refcount);
		kref_get(&(*p_base)->refcount);
	}
	}
	*handle = user_bo->prime.base.hash.key;
	*handle = user_bo->prime.base.handle;


out_no_base_object:
out_no_base_object:
	return ret;
	return ret;
@@ -940,7 +941,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,


	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);


	*handle = user_bo->prime.base.hash.key;
	*handle = user_bo->prime.base.handle;
	return ttm_ref_object_add(tfile, &user_bo->prime.base,
	return ttm_ref_object_add(tfile, &user_bo->prime.base,
				  TTM_REF_USAGE, NULL, false);
				  TTM_REF_USAGE, NULL, false);
}
}
+4 −8
Original line number Original line Diff line number Diff line
@@ -755,14 +755,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
		return -EINVAL;
		return -EINVAL;
	}
	}


	/*
	 * Approximate idr memory usage with 128 bytes. It will be limited
	 * by maximum number_of contexts anyway.
	 */

	if (unlikely(vmw_user_context_size == 0))
	if (unlikely(vmw_user_context_size == 0))
		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
		  + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;


	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
	if (unlikely(ret != 0))
	if (unlikely(ret != 0))
@@ -807,7 +803,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
		goto out_err;
		goto out_err;
	}
	}


	arg->cid = ctx->base.hash.key;
	arg->cid = ctx->base.handle;
out_err:
out_err:
	vmw_resource_unreference(&res);
	vmw_resource_unreference(&res);
out_unlock:
out_unlock:
Loading