Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 389b09a1 authored by Stephane Viau's avatar Stephane Viau Committed by Rob Clark
Browse files

drm/msm/mdp5: Add START signal to kick off certain pipelines



Some interfaces (WB, DSI Command Mode) need to be kicked off
through a START Signal. This signal needs to be sent at the right
time and requests in some cases to keep track of the pipeline
status (eg: whether pipeline registers are flushed AND output WB
buffers are ready, in case of WB interface).

Signed-off-by: default avatarStephane Viau <sviau@codeaurora.org>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent d145dd78
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@ const struct mdp5_cfg_hw msm8x74_config = {
	.ctl = {
		.count = 5,
		.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
		.flush_hw_mask = 0x0003ffff,
	},
	.pipe_vig = {
		.count = 3,
@@ -78,6 +79,7 @@ const struct mdp5_cfg_hw apq8084_config = {
	.ctl = {
		.count = 5,
		.base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
		.flush_hw_mask = 0x003fffff,
	},
	.pipe_vig = {
		.count = 4,
+6 −1
Original line number Diff line number Diff line
@@ -44,6 +44,11 @@ struct mdp5_lm_block {
	uint32_t nb_stages;		/* number of stages per blender */
};

struct mdp5_ctl_block {
	MDP5_SUB_BLOCK_DEFINITION;
	uint32_t flush_hw_mask;		/* FLUSH register's hardware mask */
};

struct mdp5_smp_block {
	int mmb_count;			/* number of SMP MMBs */
	int mmb_size;			/* MMB: size in bytes */
@@ -55,7 +60,7 @@ struct mdp5_cfg_hw {
	char  *name;

	struct mdp5_smp_block smp;
	struct mdp5_sub_block ctl;
	struct mdp5_ctl_block ctl;
	struct mdp5_sub_block pipe_vig;
	struct mdp5_sub_block pipe_rgb;
	struct mdp5_sub_block pipe_dma;
+18 −13
Original line number Diff line number Diff line
@@ -82,8 +82,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}

#define mdp5_lm_get_flush(lm)	mdp_ctl_flush_mask_lm(lm)

static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -110,8 +108,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
	drm_atomic_crtc_for_each_plane(plane, crtc) {
		flush_mask |= mdp5_plane_get_flush(plane);
	}
	flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
	flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);

	flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);

	crtc_flush(crtc, flush_mask);
}
@@ -442,13 +440,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	struct drm_device *dev = crtc->dev;
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct drm_gem_object *cursor_bo, *old_bo;
	struct drm_gem_object *cursor_bo, *old_bo = NULL;
	uint32_t blendcfg, cursor_addr, stride;
	int ret, bpp, lm;
	unsigned int depth;
	enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
	uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
	uint32_t roi_w, roi_h;
	bool cursor_enable = true;
	unsigned long flags;

	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -461,7 +460,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,

	if (!handle) {
		DBG("Cursor off");
		return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false);
		cursor_enable = false;
		goto set_cursor;
	}

	cursor_bo = drm_gem_object_lookup(dev, file, handle);
@@ -502,11 +502,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,

	spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);

	ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
	if (ret)
set_cursor:
	ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
	if (ret) {
		dev_err(dev->dev, "failed to %sable cursor: %d\n",
				cursor_enable ? "en" : "dis", ret);
		goto end;
	}

	flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
	crtc_flush(crtc, flush_mask);

end:
@@ -628,11 +631,13 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
int mdp5_crtc_get_lm(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
}

	if (WARN_ON(!crtc))
		return -EINVAL;

	return mdp5_crtc->lm;
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
{
	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
	return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
}

/* initialize crtc */
+215 −32
Original line number Diff line number Diff line
/*
 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -35,18 +35,16 @@

struct op_mode {
	struct mdp5_interface intf;
	/*
	 * TODO: add a state variable to control the pipeline
	 *
	 * eg: WB interface needs both buffer addresses to be committed +
	 * output buffers ready to be written into, before we can START.
	 */

	bool encoder_enabled;
	uint32_t start_mask;
};

struct mdp5_ctl {
	struct mdp5_ctl_manager *ctlm;

	u32 id;
	int lm;

	/* whether this CTL has been allocated or not: */
	bool busy;
@@ -58,8 +56,8 @@ struct mdp5_ctl {
	spinlock_t hw_lock;
	u32 reg_offset;

	/* flush mask used to commit CTL registers */
	u32 flush_mask;
	/* when do CTL registers need to be flushed? (mask of trigger bits) */
	u32 pending_ctl_trigger;

	bool cursor_on;

@@ -73,6 +71,9 @@ struct mdp5_ctl_manager {
	u32 nlm;
	u32 nctl;

	/* to filter out non-present bits in the current hardware config */
	u32 flush_hw_mask;

	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
	spinlock_t pool_lock;
	struct mdp5_ctl ctls[MAX_CTL];
@@ -174,6 +175,9 @@ int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)

	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));

	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
				   mdp_ctl_flush_mask_encoder(intf);

	/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
	if (!mdp5_cfg_intf_is_virtual(intf->type))
		set_display_intf(mdp5_kms, intf);
@@ -183,14 +187,90 @@ int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
	return 0;
}

int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
static bool start_signal_needed(struct mdp5_ctl *ctl)
{
	struct op_mode *pipeline = &ctl->pipeline;

	if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
		return false;

	switch (pipeline->intf.type) {
	case INTF_WB:
		return true;
	case INTF_DSI:
		return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
	default:
		return false;
	}
}

/*
 * send_start_signal() - Overlay Processor Start Signal
 *
 * For a given control operation (display pipeline), a START signal needs to be
 * executed in order to kick off operation and activate all layers.
 * e.g.: DSI command mode, Writeback
 */
static void send_start_signal(struct mdp5_ctl *ctl)
{
	unsigned long flags;

	spin_lock_irqsave(&ctl->hw_lock, flags);
	ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);
}

static void refill_start_mask(struct mdp5_ctl *ctl)
{
	struct op_mode *pipeline = &ctl->pipeline;
	struct mdp5_interface *intf = &ctl->pipeline.intf;

	pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);

	/*
	 * Writeback encoder needs to program & flush
	 * address registers for each page flip..
	 */
	if (intf->type == INTF_WB)
		pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
}

/**
 * mdp5_ctl_set_encoder_state() - set the encoder state
 *
 * @enable: true, when encoder is ready for data streaming; false, otherwise.
 *
 * Note:
 * This encoder state is needed to trigger START signal (data path kickoff).
 */
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
{
	if (WARN_ON(!ctl))
		return -EINVAL;

	ctl->pipeline.encoder_enabled = enabled;
	DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");

	if (start_signal_needed(ctl)) {
		send_start_signal(ctl);
		refill_start_mask(ctl);
	}

	return 0;
}

/*
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	unsigned long flags;
	u32 blend_cfg;
	int lm;
	int lm = ctl->lm;

	lm = mdp5_crtc_get_lm(ctl->crtc);
	if (unlikely(WARN_ON(lm < 0))) {
		dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
				ctl->id, lm);
@@ -210,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)

	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
	ctl->cursor_on = enable;

	return 0;
}


int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
{
	unsigned long flags;
@@ -229,37 +309,133 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
	spin_unlock_irqrestore(&ctl->hw_lock, flags);

	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);

	return 0;
}

u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
{
	/* these are dummy bits for now, but will appear in next chipsets: */
#define MDP5_CTL_FLUSH_TIMING_0		0x80000000
#define MDP5_CTL_FLUSH_TIMING_1		0x40000000
#define MDP5_CTL_FLUSH_TIMING_2		0x20000000
#define MDP5_CTL_FLUSH_TIMING_3		0x10000000
#define MDP5_CTL_FLUSH_WB		0x00010000

	if (intf->type == INTF_WB)
		return MDP5_CTL_FLUSH_WB;

	switch (intf->num) {
	case 0: return MDP5_CTL_FLUSH_TIMING_0;
	case 1: return MDP5_CTL_FLUSH_TIMING_1;
	case 2: return MDP5_CTL_FLUSH_TIMING_2;
	case 3: return MDP5_CTL_FLUSH_TIMING_3;
	default: return 0;
	}
}

u32 mdp_ctl_flush_mask_cursor(int cursor_id)
{
	/* these are dummy bits for now, but will appear in next chipsets: */
#define MDP5_CTL_FLUSH_CURSOR_0		0x00400000
#define MDP5_CTL_FLUSH_CURSOR_1		0x00800000

	switch (cursor_id) {
	case 0: return MDP5_CTL_FLUSH_CURSOR_0;
	case 1: return MDP5_CTL_FLUSH_CURSOR_1;
	default: return 0;
	}
}

u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
{
	switch (pipe) {
	case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
	case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
	case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
	case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
	case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
	case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
	case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
	case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
	case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
	case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
	default:        return 0;
	}
}

u32 mdp_ctl_flush_mask_lm(int lm)
{
	switch (lm) {
	case 0:  return MDP5_CTL_FLUSH_LM0;
	case 1:  return MDP5_CTL_FLUSH_LM1;
	case 2:  return MDP5_CTL_FLUSH_LM2;
	case 5:  return MDP5_CTL_FLUSH_LM5;
	default: return 0;
	}
}

static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	u32 sw_mask = 0;
#define BIT_NEEDS_SW_FIX(bit) \
	(!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))

	/* for some targets, cursor bit is the same as LM bit */
	if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
		sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);

	return sw_mask;
}

/**
 * mdp5_ctl_commit() - Register Flush
 *
 * The flush register is used to indicate several registers are all
 * programmed, and are safe to update to the back copy of the double
 * buffered registers.
 *
 * Some registers FLUSH bits are shared when the hardware does not have
 * dedicated bits for them; handling these is the job of fix_sw_flush().
 *
 * CTL registers need to be flushed in some circumstances; if that is the
 * case, some trigger bits will be present in both flush mask and
 * ctl->pending_ctl_trigger.
 */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
{
	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
	struct op_mode *pipeline = &ctl->pipeline;
	unsigned long flags;

	if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
		int lm = mdp5_crtc_get_lm(ctl->crtc);
	pipeline->start_mask &= ~flush_mask;

		if (unlikely(WARN_ON(lm < 0))) {
			dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
					ctl->id, lm);
			return -EINVAL;
		}
	VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
			pipeline->start_mask, ctl->pending_ctl_trigger);

		/* for current targets, cursor bit is the same as LM bit */
		flush_mask |= mdp_ctl_flush_mask_lm(lm);
	if (ctl->pending_ctl_trigger & flush_mask) {
		flush_mask |= MDP5_CTL_FLUSH_CTL;
		ctl->pending_ctl_trigger = 0;
	}

	flush_mask |= fix_sw_flush(ctl, flush_mask);

	flush_mask &= ctl_mgr->flush_hw_mask;

	if (flush_mask) {
		spin_lock_irqsave(&ctl->hw_lock, flags);
		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
		spin_unlock_irqrestore(&ctl->hw_lock, flags);
	}

	return 0;
	if (start_signal_needed(ctl)) {
		send_start_signal(ctl);
		refill_start_mask(ctl);
	}

u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
{
	return ctl->flush_mask;
	return 0;
}

void mdp5_ctl_release(struct mdp5_ctl *ctl)
@@ -280,6 +456,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl)
	DBG("CTL %d released", ctl->id);
}

int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
{
	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
}

/*
 * mdp5_ctl_request() - CTL dynamic allocation
 *
@@ -307,8 +488,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,

	ctl = &ctl_mgr->ctls[c];

	ctl->lm = mdp5_crtc_get_lm(crtc);
	ctl->crtc = crtc;
	ctl->busy = true;
	ctl->pending_ctl_trigger = 0;
	DBG("CTL %d allocated", ctl->id);

unlock:
@@ -339,7 +522,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
{
	struct mdp5_ctl_manager *ctl_mgr;
	const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
	unsigned long flags;
	int c, ret;

@@ -361,6 +544,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
	ctl_mgr->dev = dev;
	ctl_mgr->nlm = hw_cfg->lm.count;
	ctl_mgr->nctl = ctl_cfg->count;
	ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
	spin_lock_init(&ctl_mgr->pool_lock);

	/* initialize each CTL of the pool: */
@@ -376,7 +560,6 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
		ctl->ctlm = ctl_mgr;
		ctl->id = c;
		ctl->reg_offset = ctl_cfg->base[c];
		ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
		ctl->busy = false;
		spin_lock_init(&ctl->hw_lock);
	}
+23 −49
Original line number Diff line number Diff line
@@ -33,20 +33,13 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
 */
struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);

struct mdp5_interface;
int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);

int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);

/* @blend_cfg: see LM blender config definition below */
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);

/* @flush_mask: see CTL flush masks definitions below */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);

void mdp5_ctl_release(struct mdp5_ctl *ctl);
int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);

/*
 * blend_cfg (LM blender config):
@@ -73,51 +66,32 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
}

/*
 * flush_mask (CTL flush masks):
 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
 *
 * The following functions allow each DRM entity to get and store
 * their own flush mask.
 * Once stored, these masks will then be accessed through each DRM's
 * interface and used by the caller of mdp5_ctl_commit() to specify
 * which block(s) need to be flushed through @flush_mask parameter.
 * @blend_cfg: see LM blender config definition below
 *
 * Note:
 * CTL registers need to be flushed after calling this function
 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
 */
int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);

#define MDP5_CTL_FLUSH_CURSOR_DUMMY	0x80000000
/**
 * mdp_ctl_flush_mask...() - Register FLUSH masks
 *
 * These masks are used to specify which block(s) need to be flushed
 * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
 */
u32 mdp_ctl_flush_mask_lm(int lm);
u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
u32 mdp_ctl_flush_mask_cursor(int cursor_id);
u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);

static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
{
	/* TODO: use id once multiple cursor support is present */
	(void)cursor_id;
/* @flush_mask: see CTL flush masks definitions below */
int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);

	return MDP5_CTL_FLUSH_CURSOR_DUMMY;
}
void mdp5_ctl_release(struct mdp5_ctl *ctl);

static inline u32 mdp_ctl_flush_mask_lm(int lm)
{
	switch (lm) {
	case 0:  return MDP5_CTL_FLUSH_LM0;
	case 1:  return MDP5_CTL_FLUSH_LM1;
	case 2:  return MDP5_CTL_FLUSH_LM2;
	case 5:  return MDP5_CTL_FLUSH_LM5;
	default: return 0;
	}
}

static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
{
	switch (pipe) {
	case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
	case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
	case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
	case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
	case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
	case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
	case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
	case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
	case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
	case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
	default:        return 0;
	}
}

#endif /* __MDP5_CTL_H__ */
Loading