Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9bb109a6 authored by Chandan Uddaraju's avatar Chandan Uddaraju Committed by Gerrit - the friendly Code Review server
Browse files

drm/msm/sde: handle smmu handoff for continuous splash



Continuous splash requires frame to be pushed from buffer
during boot up. So initializing SMMU during probe will
affect this functionality. Hence SMMU initalization
has to be done only after disabling auto refresh. This
change handles SMMU handoff for continuous splash
usecase.

Change-Id: I1b52434c2505a0737bdd31a7196c1bbefb18d23f
Signed-off-by: default avatarShashank Babu Chinta Venkata <sbchin@codeaurora.org>
Signed-off-by: default avatarChandan Uddaraju <chandanu@codeaurora.org>
parent 5d641d47
Loading
Loading
Loading
Loading
+41 −0
Original line number Diff line number Diff line
@@ -42,6 +42,47 @@

#define SDE_REG_RESET_TIMEOUT_US        2000

#define MDP_CTL_FLUSH(n) ((0x2000) + (0x200*n) + CTL_FLUSH)
#define CTL_FLUSH_LM_BIT(n) (6 + n)
#define CTL_TOP_LM_OFFSET(index, lm) (0x2000 + (0x200 * index) + (lm * 0x4))

int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
		void __iomem *mmio)
{
	int i, j;
	u32 op_mode;

	if (!data) {
		pr_err("invalid splash data\n");
		return -EINVAL;
	}

	for (i = 0; i < data->ctl_top_cnt; i++) {
		struct ctl_top *top = &data->top[i];
		u8 ctl_id = data->ctl_ids[i] - CTL_0;
		u32 regval = 0;

		op_mode = readl_relaxed(mmio + MDP_CTL_FLUSH(ctl_id));

		/* Set border fill*/
		regval |= CTL_MIXER_BORDER_OUT;

		for (j = 0; j < top->ctl_lm_cnt; j++) {
			u8 lm_id = top->lm[j].lm_id - LM_0;

			writel_relaxed(regval,
			mmio + CTL_TOP_LM_OFFSET(ctl_id, lm_id));

			op_mode |= BIT(CTL_FLUSH_LM_BIT(lm_id));
		}
		op_mode |= CTL_FLUSH_MASK_CTL;

		writel_relaxed(op_mode, mmio + MDP_CTL_FLUSH(ctl_id));
	}
	return 0;

}

static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
		struct sde_mdss_cfg *m,
		void __iomem *addr,
+9 −0
Original line number Diff line number Diff line
@@ -253,6 +253,15 @@ struct sde_hw_ctl {
	struct sde_hw_ctl_ops ops;
};

/**
 * sde_unstage_pipe_for_cont_splash - Unstage pipes for continuous splash
 * @data: pointer to sde splash data
 * @mmio: mapped register io address of MDP
 * @return: error code
 */
int sde_unstage_pipe_for_cont_splash(struct sde_splash_data *data,
		void __iomem *mmio);

/**
 * sde_get_ctl_top_for_cont_splash - retrieve the current LM blocks
 * @mmio: mapped register io address of MDP
+8 −1
Original line number Diff line number Diff line
@@ -569,7 +569,11 @@ struct ctl_top {

/**
 * struct sde_splash_data - Struct contains details of continuous splash
 *	initial pipeline configuration.
 *	memory region and initial pipeline configuration.
 * @smmu_handoff_pending:boolean to notify handoff from splash memory to smmu
 * @splash_base:	Base address of continuous splash region reserved
 *                      by bootloader
 * @splash_size:	Size of continuous splash region
 * @top:	struct ctl_top objects
 * @ctl_ids:	stores the valid MDSS ctl block ids for the current mode
 * @lm_ids:	stores the valid MDSS layer mixer block ids for the current mode
@@ -579,6 +583,9 @@ struct ctl_top {
 * @dsc_cnt:	stores the active number of MDSS "dsc" blks for the current mode
 */
struct sde_splash_data {
	bool smmu_handoff_pending;
	unsigned long splash_base;
	u32 splash_size;
	struct ctl_top top[CTL_MAX - CTL_0];
	u8 ctl_ids[CTL_MAX - CTL_0];
	u8 lm_ids[LM_MAX - LM_0];
+5 −0
Original line number Diff line number Diff line
@@ -390,6 +390,11 @@ int sde_get_pp_dsc_for_cont_splash(void __iomem *mmio,
			SDE_DEBUG("Disabling autoreferesh\n");
			writel_relaxed(0x0, mmio
				+ MDP_PP_AUTOREFRESH_OFFSET(index));
			/*
			 * Wait for one frame update so that auto refresh
			 * disable is through
			 */
			usleep_range(16000, 20000);
		}
	}
	return dsc_cnt;
+7 −2
Original line number Diff line number Diff line
@@ -509,8 +509,13 @@ int init_v1(struct sde_hw_reg_dma *cfg)
			last_cmd_buf[i] =
			    alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
			if (IS_ERR_OR_NULL(last_cmd_buf[i])) {
				rc = -EINVAL;
				break;
				/*
				 * This will allow reg dma to fall back to
				 * AHB domain
				 */
				pr_info("Failed to allocate reg dma, ret:%lu\n",
						PTR_ERR(last_cmd_buf[i]));
				return 0;
			}
		}
	}
Loading