Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit acb1bc9c authored by Shashank Babu Chinta Venkata's avatar Shashank Babu Chinta Venkata Committed by Gerrit - the friendly Code Review server
Browse files

drm/msm/sde: enable one to one map on smmu



This change adds one to one mapping on smmu node
prior to enabling continuous splash.

Change-Id: Iea28d59f64d46e34d35fc93c1ac7e2bc18628691
Signed-off-by: default avatarShashank Babu Chinta Venkata <sbchin@codeaurora.org>
parent 4728a0a0
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -46,6 +46,12 @@ struct msm_mmu_funcs {
			struct dma_buf *dma_buf, int dir);
	void (*destroy)(struct msm_mmu *mmu);
	bool (*is_domain_secure)(struct msm_mmu *mmu);
	int (*set_attribute)(struct msm_mmu *mmu,
			enum iommu_attr attr, void *data);
	int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova,
			uint32_t dest_address, uint32_t size, int prot);
	int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address,
					uint32_t size);
};

struct msm_mmu {
+71 −0
Original line number Diff line number Diff line
@@ -113,6 +113,74 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names,
	dev_dbg(client->dev, "iommu domain detached\n");
}

static int msm_smmu_set_attribute(struct msm_mmu *mmu,
		enum iommu_attr attr, void *data)
{
	struct msm_smmu *smmu = to_msm_smmu(mmu);
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	struct iommu_domain *domain;
	int ret = 0;

	if (!client || !client->mmu_mapping)
		return -ENODEV;

	domain = client->mmu_mapping->domain;
	if (!domain) {
		DRM_ERROR("Invalid domain ret:%d\n", ret);
		return -EINVAL;
	}

	ret = iommu_domain_set_attr(domain, attr, data);
	if (ret)
		DRM_ERROR("set domain attribute failed:%d\n", ret);

	return ret;
}

static int msm_smmu_one_to_one_unmap(struct msm_mmu *mmu,
				uint32_t dest_address, uint32_t size)
{
	struct msm_smmu *smmu = to_msm_smmu(mmu);
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	struct iommu_domain *domain;
	int ret = 0;

	if (!client || !client->mmu_mapping)
		return -ENODEV;

	domain = client->mmu_mapping->domain;
	if (!domain)
		return -EINVAL;

	ret = iommu_unmap(domain, dest_address, size);
	if (ret != size)
		pr_err("smmu unmap failed\n");

	return 0;
}

static int msm_smmu_one_to_one_map(struct msm_mmu *mmu, uint32_t iova,
		uint32_t dest_address, uint32_t size, int prot)
{
	struct msm_smmu *smmu = to_msm_smmu(mmu);
	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
	struct iommu_domain *domain;
	int ret = 0;

	if (!client || !client->mmu_mapping)
		return -ENODEV;

	domain = client->mmu_mapping->domain;
	if (!domain)
		return -EINVAL;

	ret = iommu_map(domain, dest_address, dest_address, size, prot);
	if (ret)
		pr_err("smmu map failed\n");

	return ret;
}

static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
		struct sg_table *sgt, int prot)
{
@@ -299,6 +367,9 @@ static const struct msm_mmu_funcs funcs = {
	.unmap_dma_buf = msm_smmu_unmap_dma_buf,
	.destroy = msm_smmu_destroy,
	.is_domain_secure = msm_smmu_is_domain_secure,
	.set_attribute = msm_smmu_set_attribute,
	.one_to_one_map = msm_smmu_one_to_one_map,
	.one_to_one_unmap = msm_smmu_one_to_one_unmap,
};

static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
+0 −41
Original line number Diff line number Diff line
@@ -42,47 +42,6 @@

#define SDE_REG_RESET_TIMEOUT_US        2000

#define MDP_CTL_FLUSH(n) ((0x2000) + (0x200*n) + CTL_FLUSH)
#define CTL_FLUSH_LM_BIT(n) (6 + n)
#define CTL_TOP_LM_OFFSET(index, lm) (0x2000 + (0x200 * index) + (lm * 0x4))

int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
		void __iomem *mmio)
{
	int i, j;
	u32 op_mode;

	if (!data) {
		pr_err("invalid splash data\n");
		return -EINVAL;
	}

	for (i = 0; i < data->ctl_top_cnt; i++) {
		struct ctl_top *top = &data->top[i];
		u8 ctl_id = data->ctl_ids[i] - CTL_0;
		u32 regval = 0;

		op_mode = readl_relaxed(mmio + MDP_CTL_FLUSH(ctl_id));

		/* Set border fill*/
		regval |= CTL_MIXER_BORDER_OUT;

		for (j = 0; j < top->ctl_lm_cnt; j++) {
			u8 lm_id = top->lm[j].lm_id - LM_0;

			writel_relaxed(regval,
			mmio + CTL_TOP_LM_OFFSET(ctl_id, lm_id));

			op_mode |= BIT(CTL_FLUSH_LM_BIT(lm_id));
		}
		op_mode |= CTL_FLUSH_MASK_CTL;

		writel_relaxed(op_mode, mmio + MDP_CTL_FLUSH(ctl_id));
	}
	return 0;

}

static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
		struct sde_mdss_cfg *m,
		void __iomem *addr,
+0 −9
Original line number Diff line number Diff line
@@ -276,15 +276,6 @@ struct sde_hw_ctl {
	struct sde_hw_ctl_ops ops;
};

/**
 * sde_unstage_pipe_for_cont_splash - Unstage pipes for continuous splash
 * @data: pointer to sde splash data
 * @mmio: mapped register io address of MDP
 * @return: error code
 */
int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
		void __iomem *mmio);

/**
 * sde_hw_ctl - convert base object sde_hw_base to container
 * @hw: Pointer to base hardware block
+124 −32
Original line number Diff line number Diff line
@@ -526,6 +526,56 @@ static int _sde_kms_release_splash_buffer(unsigned int mem_addr,

}

static int _sde_kms_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
		struct sde_splash_data *data)
{
	int ret = 0;

	if (!mmu || !data)
		return -EINVAL;

	ret = mmu->funcs->one_to_one_map(mmu, data->splash_base,
				data->splash_base, data->splash_size,
				IOMMU_READ | IOMMU_NOEXEC);
	if (ret)
		SDE_ERROR("Splash smmu map failed: %d\n", ret);

	return ret;
}

static int _sde_kms_splash_smmu_unmap(struct sde_kms *sde_kms)
{
	struct sde_splash_data *data;
	struct msm_mmu *mmu;
	int rc = 0;

	if (!sde_kms)
		return -EINVAL;

	data = &sde_kms->splash_data;
	if (!data) {
		SDE_ERROR("Invalid splash data\n");
		return -EINVAL;
	}

	if (!sde_kms->aspace[0]) {
		SDE_ERROR("aspace not found for sde kms node\n");
		return -EINVAL;
	}

	mmu = sde_kms->aspace[0]->mmu;
	if (!mmu) {
		SDE_ERROR("mmu not found for aspace\n");
		return -EINVAL;
	}

	if (mmu->funcs && mmu->funcs->one_to_one_unmap)
		mmu->funcs->one_to_one_unmap(mmu, data->splash_base,
				data->splash_size);

	return rc;
}

static void sde_kms_prepare_commit(struct msm_kms *kms,
		struct drm_atomic_state *state)
{
@@ -536,8 +586,6 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
	struct drm_crtc *crtc;
	struct drm_crtc_state *crtc_state;
	int i, rc = 0;
	struct drm_plane *plane;
	bool commit_no_planes = true;

	if (!kms)
		return;
@@ -566,28 +614,8 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
		}
	}

	if (sde_kms->splash_data.smmu_handoff_pending) {
		list_for_each_entry(plane, &dev->mode_config.plane_list, head)
			if (plane->state != NULL &&
					plane->state->crtc != NULL)
				commit_no_planes = false;
	}

	if (sde_kms->splash_data.smmu_handoff_pending && commit_no_planes) {

		rc = sde_unstage_pipe_for_cont_splash(&sde_kms->splash_data,
						sde_kms->mmio);
		if (rc)
			SDE_ERROR("pipe staging failed: %d\n", rc);

		rc = _sde_kms_release_splash_buffer(
				sde_kms->splash_data.splash_base,
				sde_kms->splash_data.splash_size);
		if (rc)
			SDE_ERROR("release of splash memory failed %d\n", rc);

	if (sde_kms->splash_data.smmu_handoff_pending)
		sde_kms->splash_data.smmu_handoff_pending = false;
	}

	/*
	 * NOTE: for secure use cases we want to apply the new HW
@@ -667,12 +695,28 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);

	if (sde_kms->splash_data.cont_splash_en) {
		/* Releasing splash resources as we have first frame update */
		rc = _sde_kms_splash_smmu_unmap(sde_kms);
		SDE_DEBUG("Disabling cont_splash feature\n");
		sde_kms->splash_data.cont_splash_en = false;
		sde_power_resource_enable(&priv->phandle,
				sde_kms->core_client, false);
		SDE_DEBUG("removing Vote for MDP Resources\n");
	}

	/*
	 * Even for continuous splash disabled cases we have to release
	 * splash memory reservation back to system after first frame update.
	 */
	if (sde_kms->splash_data.splash_base) {
		rc = _sde_kms_release_splash_buffer(
				sde_kms->splash_data.splash_base,
				sde_kms->splash_data.splash_size);
		if (rc)
			pr_err("Failed to release splash memory\n");
		sde_kms->splash_data.splash_base = 0;
		sde_kms->splash_data.splash_size = 0;
	}
}

static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
@@ -1562,6 +1606,9 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
				&priv->phandle, sde_kms->power_event);

	_sde_kms_release_displays(sde_kms);
	(void)_sde_kms_release_splash_buffer(
				sde_kms->splash_data.splash_base,
				sde_kms->splash_data.splash_size);

	/* safe to call these more than once during shutdown */
	_sde_debugfs_destroy(sde_kms);
@@ -2450,6 +2497,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
{
	struct msm_mmu *mmu;
	int i, ret;
	int early_map = 1;

	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
		struct msm_gem_address_space *aspace;
@@ -2462,6 +2510,23 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
			continue;
		}

		/*
		 * Before attaching SMMU, we need to honor continuous splash
		 * use case where hardware tries to fetch buffer from physical
		 * address. To facilitate this requirement we need to have a
		 * one to one mapping on SMMU until we have our first frame.
		 */
		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
			sde_kms->splash_data.smmu_handoff_pending) {
			ret = mmu->funcs->set_attribute(mmu,
				DOMAIN_ATTR_EARLY_MAP,
				&early_map);
			if (ret) {
				SDE_ERROR("failed to set map att: %d\n", ret);
				goto fail;
			}
		}

		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
			mmu, "sde");
		if (IS_ERR(aspace)) {
@@ -2480,10 +2545,37 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
			goto fail;
		}
		aspace->domain_attached = true;
		early_map = 0;
		/* Mapping splash memory block */
		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
			sde_kms->splash_data.smmu_handoff_pending) {
			ret = _sde_kms_splash_smmu_map(sde_kms->dev, mmu,
					&sde_kms->splash_data);
			if (ret) {
				SDE_ERROR("failed to map ret:%d\n", ret);
				goto fail;
			}
			/*
			 * Turning off early map after generating one to one
			 * mapping for splash address space.
			 */
			ret = mmu->funcs->set_attribute(mmu,
				DOMAIN_ATTR_EARLY_MAP,
				&early_map);
			if (ret) {
				SDE_ERROR("failed to set map att ret:%d\n",
									ret);
				goto early_map_fail;
			}
		}
	}

	return 0;
early_map_fail:
	mmu->funcs->one_to_one_unmap(mmu, sde_kms->splash_data.splash_base,
					sde_kms->splash_data.splash_size);
fail:
	mmu->funcs->destroy(mmu);
	_sde_kms_mmu_destroy(sde_kms);

	return ret;
@@ -2591,8 +2683,6 @@ static int _sde_kms_get_splash_data(struct sde_splash_data *data)
	pr_info("found continuous splash base address:%lx size:%x\n",
						data->splash_base,
						data->splash_size);
	data->smmu_handoff_pending = true;

	return ret;
}

@@ -2602,7 +2692,6 @@ static int sde_kms_hw_init(struct msm_kms *kms)
	struct drm_device *dev;
	struct msm_drm_private *priv;
	struct sde_rm *rm = NULL;
	bool splash_mem_found = false;
	int i, rc = -EINVAL;

	if (!kms) {
@@ -2696,12 +2785,8 @@ static int sde_kms_hw_init(struct msm_kms *kms)
	}

	rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
	if (rc) {
	if (rc)
		SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
		splash_mem_found = false;
	} else {
		splash_mem_found = true;
	}

	rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
		true);
@@ -2740,11 +2825,18 @@ static int sde_kms_hw_init(struct msm_kms *kms)
	 * Attempt continuous splash handoff only if reserved
	 * splash memory is found.
	 */
	if (splash_mem_found)
	if (sde_kms->splash_data.splash_base)
		sde_rm_cont_splash_res_init(&sde_kms->rm,
					&sde_kms->splash_data,
					sde_kms->catalog);

	/*
	 * SMMU handoff is necessary for continuous splash enabled
	 * scenario.
	 */
	if (sde_kms->splash_data.cont_splash_en)
		sde_kms->splash_data.smmu_handoff_pending = true;

	/* Initialize reg dma block which is a singleton */
	rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
			sde_kms->dev);