Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 530b9f89 authored by Abhijit Kulkarni's avatar Abhijit Kulkarni
Browse files

drm/msm/sde: add active configuration support in control path.



This change adds support for active configuration in control path.
It addresses the hw modifications to allow two interfaces to be
controlled by single control path.

Change-Id: Iac5766b3255b3a1e608af1922a2aec7967faaebd
Signed-off-by: default avatarSteve Cohen <cohens@codeaurora.org>
Signed-off-by: default avatarAbhijit Kulkarni <kabhijit@codeaurora.org>
parent dd1fbe29
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -1459,6 +1459,8 @@ static int sde_ctl_parse_dt(struct device_node *np,
			set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);
		if (sde_cfg->has_sbuf)
			set_bit(SDE_CTL_SBUF, &ctl->features);
		if (sde_cfg->ctl_rev == SDE_CTL_CFG_VERSION_1_0_0)
			set_bit(SDE_CTL_ACTIVE_CFG, &ctl->features);
	}
end:
	kfree(prop_value);
@@ -3360,6 +3362,10 @@ static int _sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
		sde_cfg->perf.min_prefill_lines = 24;
		sde_cfg->vbif_qos_nlvl = 8;
		sde_cfg->ts_prefill_rev = 2;
	} else if (IS_SDM855_TARGET(hw_rev)) {
		sde_cfg->has_wb_ubwc = true;
		sde_cfg->perf.min_prefill_lines = 24;
		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
	} else {
		SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
		sde_cfg->perf.min_prefill_lines = 0xffff;
+16 −0
Original line number Diff line number Diff line
@@ -47,11 +47,13 @@
#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
#define SDE_HW_VER_401	SDE_HW_VER(4, 0, 1) /* sdm845 v2.0 */
#define SDE_HW_VER_410	SDE_HW_VER(4, 1, 0) /* sdm670 v1.0 */
#define SDE_HW_VER_500	SDE_HW_VER(5, 0, 0) /* sdm855 v1.0 */

#define IS_MSM8996_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_170)
#define IS_MSM8998_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_300)
#define IS_SDM845_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
#define IS_SDM670_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_410)
#define IS_SDM855_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_500)

#define SDE_HW_BLK_NAME_LEN	16

@@ -68,6 +70,15 @@
#define MAX_XIN_COUNT 16
#define SSPP_SUBBLK_COUNT_MAX 2

#define SDE_CTL_CFG_VERSION_1_0_0       0x100
#define MAX_INTF_PER_CTL_V1                 2
#define MAX_DSC_PER_CTL_V1                  2
#define MAX_CWB_PER_CTL_V1                  2
#define MAX_MERGE_3D_PER_CTL_V1             2
#define MAX_WB_PER_CTL_V1                   1
#define MAX_CDM_PER_CTL_V1                  1


/**
 * Supported UBWC feature versions
 */
@@ -227,6 +238,8 @@ enum {
 * @SDE_CTL_PINGPONG_SPLIT      CTL supports pingpong split
 * @SDE_CTL_SBUF                CTL supports inline stream buffer
 * @SDE_CTL_PRIMARY_PREF        CTL preferred for primary display
 * @SDE_CTL_ACTIVE_CFG          CTL configuration is specified using active
 *                              blocks
 * @SDE_CTL_MAX
 */
enum {
@@ -234,6 +247,7 @@ enum {
	SDE_CTL_PINGPONG_SPLIT,
	SDE_CTL_SBUF,
	SDE_CTL_PRIMARY_PREF,
	SDE_CTL_ACTIVE_CFG,
	SDE_CTL_MAX
};

@@ -928,6 +942,7 @@ struct sde_perf_cfg {
 * @qseed_type         qseed2 or qseed3 support.
 * @csc_type           csc or csc_10bit support.
 * @smart_dma_rev      Supported version of SmartDMA feature.
 * @ctl_rev            supported version of control path.
 * @has_src_split      source split feature status
 * @has_cdp            Client driven prefetch feature status
 * @has_wb_ubwc        UBWC feature supported on WB
@@ -954,6 +969,7 @@ struct sde_mdss_cfg {
	u32 qseed_type;
	u32 csc_type;
	u32 smart_dma_rev;
	u32 ctl_rev;
	bool has_src_split;
	bool has_cdp;
	bool has_dim_layer;
+332 −7
Original line number Diff line number Diff line
@@ -36,6 +36,22 @@
#define   CTL_ROT_FLUSH                 0x0C4
#define   CTL_ROT_START                 0x0CC

#define   CTL_MERGE_3D_ACTIVE           0x0E4
#define   CTL_DSC_ACTIVE                0x0E8
#define   CTL_WB_ACTIVE                 0x0EC
#define   CTL_CWB_ACTIVE                0x0F0
#define   CTL_INTF_ACTIVE               0x0F4
#define   CTL_CDM_ACTIVE                0x0F8

#define   CTL_MERGE_3D_FLUSH           0x100
#define   CTL_DSC_FLUSH                0x104
#define   CTL_WB_FLUSH                 0x108
#define   CTL_CWB_FLUSH                0x10C
#define   CTL_INTF_FLUSH               0x110
#define   CTL_CDM_FLUSH                0x114

#define  CTL_INTF_MASTER               0x134

#define CTL_MIXER_BORDER_OUT            BIT(24)
#define CTL_FLUSH_MASK_ROT              BIT(27)
#define CTL_FLUSH_MASK_CTL              BIT(17)
@@ -87,6 +103,54 @@ static const u32 rot_tbl[ROT_MAX] = {SDE_NONE, 27};
 */
static const u32 intf_tbl[INTF_MAX] = {SDE_NONE, 31, 30, 29, 28};

/**
 * Below definitions are for CTL supporting SDE_CTL_ACTIVE_CFG,
 * certain blocks have the individual flush control as well,
 * for such blocks flush is done by flushing individual control and
 * top level control.
 */

/**
 * list of WB bits in CTL_WB_FLUSH
 */
static const u32 wb_flush_tbl[WB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 2};

/**
 * list of INTF bits in CTL_INTF_FLUSH
 */
static const u32 intf_flush_tbl[INTF_MAX] = {SDE_NONE, 0, 1, 2, 3, 4, 5};

/**
 * list of DSC bits in CTL_DSC_FLUSH
 */
static const u32 dsc_flush_tbl[DSC_MAX] = {SDE_NONE, 0, 1, 2, 3, 4, 5};

/**
 * list of MERGE_3D bits in CTL_MERGE_3D_FLUSH
 */
static const u32 merge_3d_tbl[MERGE_3D_MAX] = {SDE_NONE, 0, 1, 2};

/**
 * list of CDM bits in CTL_CDM_FLUSH
 */
static const u32 cdm_flush_tbl[CDM_MAX] = {SDE_NONE, 0};

/**
 * list of CWB bits in CTL_CWB_FLUSH
 */
static const u32 cwb_flush_tbl[CWB_MAX] = {SDE_NONE, SDE_NONE, SDE_NONE, 2, 3,
	4, 5};

/**
 * Individual flush bit in CTL_FLUSH
 */
#define  WB_IDX         16
#define  DSC_IDX        22
#define  MERGE_3D_IDX   23
#define  CDM_IDX        26
#define  CWB_IDX        28
#define  INTF_IDX       31

static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
		struct sde_mdss_cfg *m,
		void __iomem *addr,
@@ -188,7 +252,7 @@ static int sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx,
	if (!ctx || !cfg)
		return -EINVAL;

	cfg->pending_flush_mask = ctx->flush.pending_flush_mask;
	memcpy(cfg, &ctx->flush, sizeof(*cfg));
	return 0;
}

@@ -347,6 +411,171 @@ static inline int sde_hw_ctl_update_bitmask_intf(struct sde_hw_ctl *ctx,
	return 0;
}

static inline int sde_hw_ctl_update_bitmask_wb_v1(struct sde_hw_ctl *ctx,
		enum sde_wb wb, bool enable)
{
	if (!ctx)
		return -EINVAL;

	if (wb != WB_2) {
		SDE_ERROR("Unsupported wb %d\n", wb);
		return -EINVAL;
	}

	UPDATE_MASK(ctx->flush.pending_wb_flush_mask, wb_flush_tbl[wb], enable);
	if (ctx->flush.pending_wb_flush_mask)
		UPDATE_MASK(ctx->flush.pending_flush_mask, WB_IDX, 1);
	else
		UPDATE_MASK(ctx->flush.pending_flush_mask, WB_IDX, 0);
	return 0;
}

static inline int sde_hw_ctl_update_bitmask_intf_v1(struct sde_hw_ctl *ctx,
		enum sde_intf intf, bool enable)
{
	if (!ctx)
		return -EINVAL;

	if (!(intf > SDE_NONE) || !(intf < INTF_MAX)) {
		SDE_ERROR("Unsupported intf %d\n", intf);
		return -EINVAL;
	}

	UPDATE_MASK(ctx->flush.pending_intf_flush_mask, intf_flush_tbl[intf],
			enable);
	if (ctx->flush.pending_intf_flush_mask)
		UPDATE_MASK(ctx->flush.pending_flush_mask, INTF_IDX, 1);
	else
		UPDATE_MASK(ctx->flush.pending_flush_mask, INTF_IDX, 0);
	return 0;
}

static inline int sde_hw_ctl_update_bitmask_dsc_v1(struct sde_hw_ctl *ctx,
		enum sde_dsc dsc, bool enable)
{
	if (!ctx)
		return -EINVAL;

	if (!(dsc > SDE_NONE) || !(dsc < DSC_MAX)) {
		SDE_ERROR("Unsupported dsc %d\n", dsc);
		return -EINVAL;
	}

	UPDATE_MASK(ctx->flush.pending_dsc_flush_mask, dsc_flush_tbl[dsc],
			enable);
	if (ctx->flush.pending_dsc_flush_mask)
		UPDATE_MASK(ctx->flush.pending_flush_mask, DSC_IDX, 1);
	else
		UPDATE_MASK(ctx->flush.pending_flush_mask, DSC_IDX, 0);
	return 0;
}

static inline int sde_hw_ctl_update_bitmask_merge3d_v1(struct sde_hw_ctl *ctx,
		enum sde_merge_3d merge_3d, bool enable)
{
	if (!ctx)
		return -EINVAL;

	if (!(merge_3d > SDE_NONE) || !(merge_3d < MERGE_3D_MAX)) {
		SDE_ERROR("Unsupported merge_3d %d\n", merge_3d);
		return -EINVAL;
	}

	UPDATE_MASK(ctx->flush.pending_merge_3d_flush_mask,
			merge_3d_tbl[merge_3d], enable);
	if (ctx->flush.pending_merge_3d_flush_mask)
		UPDATE_MASK(ctx->flush.pending_flush_mask, MERGE_3D_IDX, 1);
	else
		UPDATE_MASK(ctx->flush.pending_flush_mask, MERGE_3D_IDX, 0);
	return 0;
}

static inline int sde_hw_ctl_update_bitmask_cdm_v1(struct sde_hw_ctl *ctx,
		enum sde_cdm cdm, bool enable)
{
	if (!ctx)
		return -EINVAL;

	if (cdm != CDM_0) {
		SDE_ERROR("Unsupported cdm %d\n", cdm);
		return -EINVAL;
	}

	UPDATE_MASK(ctx->flush.pending_cdm_flush_mask, cdm_flush_tbl[cdm],
			enable);
	if (ctx->flush.pending_cdm_flush_mask)
		UPDATE_MASK(ctx->flush.pending_flush_mask, CDM_IDX, 1);
	else
		UPDATE_MASK(ctx->flush.pending_flush_mask, CDM_IDX, 0);
	return 0;
}

static inline int sde_hw_ctl_update_bitmask_cwb_v1(struct sde_hw_ctl *ctx,
		enum sde_cwb cwb, bool enable)
{
	if (!ctx)
		return -EINVAL;

	if ((cwb < CWB_2) || (cwb >= CWB_MAX)) {
		SDE_ERROR("Unsupported cwb %d\n", cwb);
		return -EINVAL;
	}

	UPDATE_MASK(ctx->flush.pending_cwb_flush_mask, cwb_flush_tbl[cwb],
			enable);
	if (ctx->flush.pending_cwb_flush_mask)
		UPDATE_MASK(ctx->flush.pending_flush_mask, CWB_IDX, 1);
	else
		UPDATE_MASK(ctx->flush.pending_flush_mask, CWB_IDX, 0);
	return 0;
}

static inline int sde_hw_ctl_update_pending_flush_v1(
		struct sde_hw_ctl *ctx,
		struct sde_ctl_flush_cfg *cfg)
{
	if (!ctx || !cfg)
		return -EINVAL;

	ctx->flush.pending_flush_mask |= cfg->pending_flush_mask;
	ctx->flush.pending_intf_flush_mask |= cfg->pending_intf_flush_mask;
	ctx->flush.pending_cdm_flush_mask |= cfg->pending_cdm_flush_mask;
	ctx->flush.pending_wb_flush_mask |= cfg->pending_wb_flush_mask;
	ctx->flush.pending_dsc_flush_mask |= cfg->pending_dsc_flush_mask;
	ctx->flush.pending_merge_3d_flush_mask |=
		cfg->pending_merge_3d_flush_mask;
	ctx->flush.pending_cwb_flush_mask |= cfg->pending_cwb_flush_mask;
	return 0;
}

static inline int sde_hw_ctl_trigger_flush_v1(struct sde_hw_ctl *ctx)
{
	if (!ctx)
		return -EINVAL;

	if (ctx->flush.pending_flush_mask & BIT(WB_IDX))
		SDE_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
				ctx->flush.pending_wb_flush_mask);
	if (ctx->flush.pending_flush_mask & BIT(DSC_IDX))
		SDE_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
				ctx->flush.pending_dsc_flush_mask);
	if (ctx->flush.pending_flush_mask & BIT(MERGE_3D_IDX))
		SDE_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
				ctx->flush.pending_merge_3d_flush_mask);
	if (ctx->flush.pending_flush_mask & BIT(CDM_IDX))
		SDE_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
				ctx->flush.pending_cdm_flush_mask);
	if (ctx->flush.pending_flush_mask & BIT(CWB_IDX))
		SDE_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
				ctx->flush.pending_cwb_flush_mask);
	if (ctx->flush.pending_flush_mask & BIT(INTF_IDX))
		SDE_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
				ctx->flush.pending_intf_flush_mask);

	SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->flush.pending_flush_mask);
	return 0;
}

static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 timeout_us)
{
	struct sde_hw_blk_reg_map *c;
@@ -589,6 +818,83 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
	SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
}

static int sde_hw_ctl_intf_cfg_v1(struct sde_hw_ctl *ctx,
		struct sde_hw_intf_cfg_v1 *cfg)
{
	struct sde_hw_blk_reg_map *c;
	u32 intf_active = 0;
	u32 wb_active = 0;
	u32 merge_3d_active = 0;
	u32 cwb_active = 0;
	u32 mode_sel = 0;
	u32 cdm_active = 0;
	u32 intf_master = 0;
	u32 i;

	if (!ctx)
		return -EINVAL;

	c = &ctx->hw;
	for (i = 0; i < cfg->intf_count; i++) {
		if (cfg->intf[i])
			intf_active |= BIT(cfg->intf[i] - INTF_0);
	}

	if (cfg->intf_count > 1)
		intf_master = BIT(cfg->intf_master - INTF_0);

	for (i = 0; i < cfg->wb_count; i++) {
		if (cfg->wb[i])
			wb_active |= BIT(cfg->wb[i] - WB_0);
	}

	for (i = 0; i < cfg->merge_3d_count; i++) {
		if (cfg->merge_3d[i])
			merge_3d_active |= BIT(cfg->merge_3d[i] - MERGE_3D_0);
	}

	for (i = 0; i < cfg->cwb_count; i++) {
		if (cfg->cwb[i])
			cwb_active |= BIT(cfg->cwb[i] - CWB_0);
	}

	for (i = 0; cfg->cdm_count; i++) {
		if (cfg->cdm[i])
			cdm_active |= BIT(cfg->cdm[i] - CDM_0);
	}

	if (cfg->intf_mode_sel == SDE_CTL_MODE_SEL_CMD)
		mode_sel |= BIT(17);

	SDE_REG_WRITE(c, CTL_TOP, mode_sel);
	SDE_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
	SDE_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
	SDE_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
	SDE_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
	SDE_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, merge_3d_active);
	SDE_REG_WRITE(c, CTL_INTF_MASTER, intf_master);
	return 0;
}

static int sde_hw_ctl_dsc_cfg(struct sde_hw_ctl *ctx,
		struct sde_ctl_dsc_cfg *cfg)
{
	struct sde_hw_blk_reg_map *c;
	u32 dsc_active = 0;
	int i;

	if (!ctx)
		return -EINVAL;

	c = &ctx->hw;
	for (i = 0; i < cfg->dsc_count; i++)
		if (cfg->dsc[i])
			dsc_active |= BIT(cfg->dsc[i] - DSC_0);

	SDE_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
	return 0;
}

static int sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
		struct sde_hw_intf_cfg *cfg)
{
@@ -690,16 +996,38 @@ static int sde_hw_reg_dma_flush(struct sde_hw_ctl *ctx, bool blocking)
static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
		unsigned long cap)
{
	ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
	if (cap & BIT(SDE_CTL_ACTIVE_CFG)) {
		ops->update_pending_flush =
			sde_hw_ctl_update_pending_flush_v1;
		ops->trigger_flush = sde_hw_ctl_trigger_flush_v1;

		ops->setup_intf_cfg_v1 = sde_hw_ctl_intf_cfg_v1;
		ops->setup_dsc_cfg = sde_hw_ctl_dsc_cfg;

		ops->update_bitmask_cdm = sde_hw_ctl_update_bitmask_cdm_v1;
		ops->update_bitmask_wb = sde_hw_ctl_update_bitmask_wb_v1;
		ops->update_bitmask_intf = sde_hw_ctl_update_bitmask_intf_v1;
		ops->update_bitmask_dsc = sde_hw_ctl_update_bitmask_dsc_v1;
		ops->update_bitmask_merge3d =
			sde_hw_ctl_update_bitmask_merge3d_v1;
		ops->update_bitmask_cwb = sde_hw_ctl_update_bitmask_cwb_v1;
	} else {
		ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
	ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
		ops->trigger_flush = sde_hw_ctl_trigger_flush;

		ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;

		ops->update_bitmask_cdm = sde_hw_ctl_update_bitmask_cdm;
		ops->update_bitmask_wb = sde_hw_ctl_update_bitmask_wb;
		ops->update_bitmask_intf = sde_hw_ctl_update_bitmask_intf;
	}
	ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
	ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
	ops->get_flush_register = sde_hw_ctl_get_flush_register;
	ops->trigger_start = sde_hw_ctl_trigger_start;
	ops->trigger_pending = sde_hw_ctl_trigger_pending;
	ops->read_ctl_top = sde_hw_ctl_read_ctl_top;
	ops->read_ctl_layers = sde_hw_ctl_read_ctl_layers;
	ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
	ops->reset = sde_hw_ctl_reset_control;
	ops->get_reset = sde_hw_ctl_get_reset_status;
	ops->hard_reset = sde_hw_ctl_hard_reset;
@@ -710,9 +1038,6 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
	ops->update_bitmask_mixer = sde_hw_ctl_update_bitmask_mixer;
	ops->update_bitmask_dspp = sde_hw_ctl_update_bitmask_dspp;
	ops->update_bitmask_dspp_pavlut = sde_hw_ctl_update_bitmask_dspp_pavlut;
	ops->update_bitmask_intf = sde_hw_ctl_update_bitmask_intf;
	ops->update_bitmask_cdm = sde_hw_ctl_update_bitmask_cdm;
	ops->update_bitmask_wb = sde_hw_ctl_update_bitmask_wb;
	ops->reg_dma_flush = sde_hw_reg_dma_flush;
	ops->get_start_state = sde_hw_ctl_get_start_state;
	if (cap & BIT(SDE_CTL_SBUF)) {
+104 −0
Original line number Diff line number Diff line
@@ -71,6 +71,54 @@ struct sde_hw_intf_cfg {
	int stream_sel;
};

/**
 * struct sde_hw_intf_cfg_v1 :Describes the data strcuture to configure the
 *                            output interfaces for a particular display on a
 *                            platform which supports ctl path version 1.
 * @intf_count:               No. of active interfaces for this display
 * @intf :                    Interface ids of active interfaces
 * @intf_mode_sel:            Interface mode, cmd / vid
 * @intf_master:              Master interface for split display
 * @wb_count:                 No. of active writebacks
 * @wb:                       Writeback ids of active writebacks
 * @merge_3d_count            No. of active merge_3d blocks
 * @merge_3d:                 Id of the active merge 3d blocks
 * @cwb_count:                No. of active concurrent writebacks
 * @cwb:                      Id of active cwb blocks
 * @cdm_count:                No. of active chroma down module
 * @cdm:                      Id of active cdm blocks
 */
struct sde_hw_intf_cfg_v1 {
	uint32_t intf_count;
	enum sde_intf intf[MAX_INTF_PER_CTL_V1];
	enum sde_ctl_mode_sel intf_mode_sel;
	enum sde_intf intf_master;

	uint32_t wb_count;
	enum sde_wb wb[MAX_WB_PER_CTL_V1];

	uint32_t merge_3d_count;
	enum sde_merge_3d merge_3d[MAX_MERGE_3D_PER_CTL_V1];

	uint32_t cwb_count;
	enum sde_cwb cwb[MAX_CWB_PER_CTL_V1];

	uint32_t cdm_count;
	enum sde_cdm cdm[MAX_CDM_PER_CTL_V1];
};

/**
 * struct sde_hw_ctl_dsc_cfg :Describes the DSC blocks being used for this
 *                            display on a platoform which supports ctl path
 *                            version 1.
 * @dsc_count:                No. of active dsc blocks
 * @dsc:                      Id of active dsc blocks
 */
struct sde_ctl_dsc_cfg {
	uint32_t dsc_count;
	enum sde_dsc dsc[MAX_DSC_PER_CTL_V1];
};

/**
 * struct sde_ctl_sbuf_cfg - control for stream buffer configuration
 * @rot_op_mode: rotator operation mode
@@ -91,9 +139,24 @@ struct sde_ctl_sbuf_cfg {
 * pending flush mask.
 *
 * @pending_flush_mask: pending ctl_flush
 * CTL path version SDE_CTL_CFG_VERSION_1_0_0 has * two level flush mechanism
 * for lower pipe controls. individual control should be flushed before
 * exercising top level flush
 * @pending_intf_flush_mask: pending INTF flush
 * @pending_cdm_flush_mask: pending CDWN block flush
 * @pending_wb_flush_mask: pending writeback flush
 * @pending_dsc_flush_mask: pending dsc flush
 * @pending_merge_3d_flush_mask: pending 3d merge block flush
 * @pending_cwb_flush_mask: pending flush for concurrent writeback
 */
struct sde_ctl_flush_cfg {
	u32 pending_flush_mask;
	u32 pending_intf_flush_mask;
	u32 pending_cdm_flush_mask;
	u32 pending_wb_flush_mask;
	u32 pending_dsc_flush_mask;
	u32 pending_merge_3d_flush_mask;
	u32 pending_cwb_flush_mask;
};

/**
@@ -177,6 +240,24 @@ struct sde_hw_ctl_ops {
	int (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
		struct sde_hw_intf_cfg *cfg);

	/**
	 * Setup ctl_path interface config for SDE_CTL_ACTIVE_CFG
	 * @ctx   : ctl path ctx pointer
	 * @cfg    : interface config structure pointer
	 * @Return: error code
	 */
	int (*setup_intf_cfg_v1)(struct sde_hw_ctl *ctx,
		struct sde_hw_intf_cfg_v1 *cfg);

	/**
	 * Setup ctl_path dsc config for SDE_CTL_ACTIVE_CFG
	 * @ctx   : ctl path ctx pointer
	 * @cfg    : dsc config structure pointer
	 * @Return: error code
	 */
	int (*setup_dsc_cfg)(struct sde_hw_ctl *ctx,
		struct sde_ctl_dsc_cfg *cfg);

	int (*reset)(struct sde_hw_ctl *c);

	/**
@@ -268,6 +349,29 @@ struct sde_hw_ctl_ops {
	int (*update_bitmask_rot)(struct sde_hw_ctl *ctx,
		enum sde_rot blk, bool enable);

	/**
	 * update_bitmask_dsc: updates mask corresponding to dsc
	 * @blk               : blk id
	 * @enable            : true to enable, 0 to disable
	 */
	int (*update_bitmask_dsc)(struct sde_hw_ctl *ctx,
		enum sde_dsc blk, bool enable);

	/**
	 * update_bitmask_merge3d: updates mask corresponding to merge_3d
	 * @blk               : blk id
	 * @enable            : true to enable, 0 to disable
	 */
	int (*update_bitmask_merge3d)(struct sde_hw_ctl *ctx,
		enum sde_merge_3d blk, bool enable);

	/**
	 * update_bitmask_cwb: updates mask corresponding to cwb
	 * @blk               : blk id
	 * @enable            : true to enable, 0 to disable
	 */
	int (*update_bitmask_cwb)(struct sde_hw_ctl *ctx,
		enum sde_cwb blk, bool enable);
	/**
	 * read CTL_TOP register value and return
	 * the data.
+9 −0
Original line number Diff line number Diff line
@@ -272,6 +272,8 @@ enum sde_cwb {
	CWB_1,
	CWB_2,
	CWB_3,
	CWB_4,
	CWB_5,
	CWB_MAX
};

@@ -311,6 +313,13 @@ enum sde_inline_rot {
	INLINE_ROT_MAX
};

enum sde_merge_3d {
	MERGE_3D_0 = 1,
	MERGE_3D_1,
	MERGE_3D_2,
	MERGE_3D_MAX
};

/**
 * SDE HW,Component order color map
 */