Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 75c31e76 authored by Abhijit Kulkarni's avatar Abhijit Kulkarni Committed by Veera Sundaram Sankaran
Browse files

drm/msm/sde: fix issues in secure transition functionality



Fix mutex deadlock issues in the aspace callback functions.
Aspace callback functions are called with the mutex lock
acquired, so all the gem apis locked versions should be used
within the callbacks. Fix invalid checks in reg_dma and
modify the callback functions to reflect locked. Fix issues
with the buffer addition and removal to active_list during
secure-transition.

Change-Id: I1d22591cc751320184f5e62def2cdfe3ef87d697
Signed-off-by: default avatarAbhijit Kulkarni <kabhijit@codeaurora.org>
Signed-off-by: default avatarVeera Sundaram Sankaran <veeras@codeaurora.org>
parent c5507b7e
Loading
Loading
Loading
Loading
+17 −15
Original line number Diff line number Diff line
@@ -311,8 +311,7 @@ static void obj_remove_domain(struct msm_gem_vma *domain)
	}
}

static void
put_iova(struct drm_gem_object *obj)
static void put_iova(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -324,12 +323,14 @@ put_iova(struct drm_gem_object *obj)
		if (iommu_present(&platform_bus_type)) {
			msm_gem_unmap_vma(domain->aspace, domain,
				msm_obj->sgt, get_dmabuf_ptr(obj));

			msm_gem_remove_obj_from_aspace_active_list(
					domain->aspace,
					obj);
		}

		/*
		 * put_iova removes the domain connected to the obj which makes
		 * the aspace inaccessible. Store the aspace, as it is used to
		 * update the active_list during gem_free_obj and gem_purege.
		 */
		msm_obj->aspace = domain->aspace;
		obj_remove_domain(domain);
	}
}
@@ -409,6 +410,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,

	if (!ret && domain) {
		*iova = domain->iova;
		if (aspace && aspace->domain_attached)
			msm_gem_add_obj_to_aspace_active_list(aspace, obj);
	} else {
		obj_remove_domain(domain);
@@ -485,24 +487,21 @@ void msm_gem_aspace_domain_attach_detach_update(
		/**
		 * Unmap active buffers,
		 * typically clients should do this when the callback is called,
		 * but this needs to be done for the framebuffers which are not
		 * attached to any planes. (background apps)
		 * but this needs to be done for the buffers which are not
		 * attached to any planes.
		 */
		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
			obj = &msm_obj->base;
			if (obj->import_attach) {
			if (obj->import_attach)
				put_iova(obj);
				put_pages(obj);
			}
		}
	} else {
		/* map active buffers */
		list_for_each_entry(msm_obj, &aspace->active_list,
				iova_list) {
		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
			obj = &msm_obj->base;
			ret = msm_gem_get_iova_locked(obj, aspace, &iova);
			if (ret) {
				mutex_unlock(&obj->dev->struct_mutex);
				mutex_unlock(&aspace->dev->struct_mutex);
				return;
			}
		}
@@ -613,6 +612,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
	WARN_ON(obj->import_attach);

	put_iova(obj);
	msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);

	msm_gem_vunmap(obj);

@@ -841,6 +841,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
	list_del(&msm_obj->mm_list);

	put_iova(obj);
	msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);

	if (obj->import_attach) {
		if (msm_obj->vaddr)
@@ -946,6 +947,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
	INIT_LIST_HEAD(&msm_obj->submit_entry);
	INIT_LIST_HEAD(&msm_obj->domains);
	INIT_LIST_HEAD(&msm_obj->iova_list);
	msm_obj->aspace = NULL;

	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);

+2 −0
Original line number Diff line number Diff line
@@ -122,6 +122,8 @@ struct msm_gem_object {
	 */
	struct drm_mm_node *vram_node;
	struct list_head iova_list;

	struct msm_gem_address_space *aspace;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)

+22 −15
Original line number Diff line number Diff line
@@ -582,7 +582,7 @@ int reset_v1(struct sde_hw_ctl *ctl)
	return 0;
}

static void sde_reg_dma_aspace_cb(void *cb_data, bool attach)
static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
{
	struct sde_reg_dma_buffer *dma_buf = NULL;
	struct msm_gem_address_space *aspace = NULL;
@@ -597,14 +597,23 @@ static void sde_reg_dma_aspace_cb(void *cb_data, bool attach)
	dma_buf = (struct sde_reg_dma_buffer *)cb_data;
	aspace = dma_buf->aspace;

	if (attach) {
		rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
	if (is_detach) {
		/* invalidate the stored iova */
		dma_buf->iova = 0;

		/* return the virtual address mapping */
		msm_gem_put_vaddr_locked(dma_buf->buf);
		msm_gem_vunmap(dma_buf->buf);

	} else {
		rc = msm_gem_get_iova_locked(dma_buf->buf, aspace,
				&dma_buf->iova);
		if (rc) {
			DRM_ERROR("failed to get the iova rc %d\n", rc);
			return;
		}

		dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
		dma_buf->vaddr = msm_gem_get_vaddr_locked(dma_buf->buf);
		if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
			DRM_ERROR("failed to get va rc %d\n", rc);
			return;
@@ -615,13 +624,6 @@ static void sde_reg_dma_aspace_cb(void *cb_data, bool attach)
		dma_buf->iova = dma_buf->iova + offset;
		dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
		dma_buf->next_op_allowed = DECODE_SEL_OP;
	} else {
		/* invalidate the stored iova */
		dma_buf->iova = 0;

		/* return the virtual address mapping */
		msm_gem_put_vaddr(dma_buf->buf);
		msm_gem_vunmap(dma_buf->buf);
	}
}

@@ -661,7 +663,7 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)

	/* register to aspace */
	rc = msm_gem_address_space_register_cb(aspace,
			sde_reg_dma_aspace_cb,
			sde_reg_dma_aspace_cb_locked,
			(void *)dma_buf);
	if (rc) {
		DRM_ERROR("failed to register callback %d", rc);
@@ -694,8 +696,8 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
put_iova:
	msm_gem_put_iova(dma_buf->buf, aspace);
free_aspace_cb:
	msm_gem_address_space_unregister_cb(aspace, sde_reg_dma_aspace_cb,
			dma_buf);
	msm_gem_address_space_unregister_cb(aspace,
			sde_reg_dma_aspace_cb_locked, dma_buf);
free_gem:
	msm_gem_free_object(dma_buf->buf);
fail:
@@ -713,7 +715,7 @@ static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
	if (dma_buf->buf) {
		msm_gem_put_iova(dma_buf->buf, 0);
		msm_gem_address_space_unregister_cb(dma_buf->aspace,
				sde_reg_dma_aspace_cb, dma_buf);
				sde_reg_dma_aspace_cb_locked, dma_buf);
		mutex_lock(&reg_dma->drm_dev->struct_mutex);
		msm_gem_free_object(dma_buf->buf);
		mutex_unlock(&reg_dma->drm_dev->struct_mutex);
@@ -774,6 +776,11 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q)
		return -EINVAL;
	}

	if (!last_cmd_buf->iova) {
		DRM_DEBUG("iova not set, possible secure session\n");
		return 0;
	}

	cfg.dma_buf = last_cmd_buf;
	reset_reg_dma_buffer_v1(last_cmd_buf);
	if (validate_last_cmd(&cfg)) {