Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0e08270a authored by Sushmita Susheelendra's avatar Sushmita Susheelendra Committed by Rob Clark
Browse files

drm/msm: Separate locking of buffer resources from struct_mutex



Buffer object specific resources like pages, domains, sg list
need not be protected with struct_mutex. They can be protected
with a buffer object level lock. This simplifies locking and
makes it easier to avoid potential recursive locking scenarios
for SVM involving mmap_sem and struct_mutex. This also removes
unnecessary serialization when creating buffer objects, and also
between buffer object creation and GPU command submission.

Signed-off-by: default avatarSushmita Susheelendra <ssusheel@codeaurora.org>
[robclark: squash in handling new locking for shrinker]
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 816fa34c
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -297,18 +297,18 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
	struct drm_gem_object *bo;
	void *ptr;

	bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
	bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
	if (IS_ERR(bo))
		return bo;

	ptr = msm_gem_get_vaddr_locked(bo);
	ptr = msm_gem_get_vaddr(bo);
	if (!ptr) {
		drm_gem_object_unreference(bo);
		return ERR_PTR(-ENOMEM);
	}

	if (iova) {
		int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova);
		int ret = msm_gem_get_iova(bo, gpu->aspace, iova);

		if (ret) {
			drm_gem_object_unreference(bo);
@@ -318,7 +318,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,

	memcpy(ptr, &fw->data[4], fw->size - 4);

	msm_gem_put_vaddr_locked(bo);
	msm_gem_put_vaddr(bo);
	return bo;
}

+4 −4
Original line number Diff line number Diff line
@@ -294,15 +294,15 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
	 */
	bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;

	a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
	a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
	if (IS_ERR(a5xx_gpu->gpmu_bo))
		goto err;

	if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace,
	if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
			&a5xx_gpu->gpmu_iova))
		goto err;

	ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo);
	ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
	if (!ptr)
		goto err;

@@ -321,7 +321,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
		cmds_size -= _size;
	}

	msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo);
	msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
	a5xx_gpu->gpmu_dwords = dwords;

	goto out;
+1 −3
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)

	DBG("%s", gpu->name);

	ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
	ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
	if (ret) {
		gpu->rb_iova = 0;
		dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -397,10 +397,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
			return ret;
	}

	mutex_lock(&drm->struct_mutex);
	adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
			MSM_BO_UNCACHED);
	mutex_unlock(&drm->struct_mutex);
	if (IS_ERR(adreno_gpu->memptrs_bo)) {
		ret = PTR_ERR(adreno_gpu->memptrs_bo);
		adreno_gpu->memptrs_bo = NULL;
+1 −3
Original line number Diff line number Diff line
@@ -982,18 +982,16 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
	uint64_t iova;

	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
		mutex_lock(&dev->struct_mutex);
		msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
		if (IS_ERR(msm_host->tx_gem_obj)) {
			ret = PTR_ERR(msm_host->tx_gem_obj);
			pr_err("%s: failed to allocate gem, %d\n",
				__func__, ret);
			msm_host->tx_gem_obj = NULL;
			mutex_unlock(&dev->struct_mutex);
			return ret;
		}

		ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj,
		ret = msm_gem_get_iova(msm_host->tx_gem_obj,
				priv->kms->aspace, &iova);
		mutex_unlock(&dev->struct_mutex);
		if (ret) {
+1 −1
Original line number Diff line number Diff line
@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc)
		if (next_bo) {
			/* take a obj ref + iova ref when we start scanning out: */
			drm_gem_object_reference(next_bo);
			msm_gem_get_iova_locked(next_bo, kms->aspace, &iova);
			msm_gem_get_iova(next_bo, kms->aspace, &iova);

			/* enable cursor: */
			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
Loading