Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d9e78265 authored by Gopikrishnaiah Anandan's avatar Gopikrishnaiah Anandan
Browse files

drm: msm: sde: api support for blocking lut dma kick off



If lut dma is busy it will block the control flush and start on the sde
block. This can cause under-run in the sde block in video mode panels.
Enhance the api of lut dma kick-off to be configured as
blocking/non-blocking.

Change-Id: Ibb009978a9ea059b6ed3fa97ebb7588ecbdb10eb
Signed-off-by: default avatarGopikrishnaiah Anandan <agopik@codeaurora.org>
parent 93c1a029
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2638,7 +2638,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
				phys->split_role != ENC_ROLE_SKIP)
			set_bit(i, sde_enc->frame_busy_mask);
		if (phys->hw_ctl->ops.reg_dma_flush)
			phys->hw_ctl->ops.reg_dma_flush(phys->hw_ctl);
			phys->hw_ctl->ops.reg_dma_flush(phys->hw_ctl, false);
		if (!phys->ops.needs_single_flush ||
				!phys->ops.needs_single_flush(phys))
			_sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
+3 −2
Original line number Diff line number Diff line
@@ -563,12 +563,13 @@ static void sde_hw_ctl_setup_sbuf_cfg(struct sde_hw_ctl *ctx,
	SDE_REG_WRITE(c, CTL_ROT_TOP, val);
}

static void sde_hw_reg_dma_flush(struct sde_hw_ctl *ctx)
static void sde_hw_reg_dma_flush(struct sde_hw_ctl *ctx, bool blocking)
{
	struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops();

	if (ops && ops->last_command)
		ops->last_command(ctx, DMA_CTL_QUEUE0);
		ops->last_command(ctx, DMA_CTL_QUEUE0,
		    (blocking ? REG_DMA_WAIT4_COMP : REG_DMA_NOWAIT));
}

static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
+2 −1
Original line number Diff line number Diff line
@@ -214,8 +214,9 @@ struct sde_hw_ctl_ops {
	/**
	 * Flush the reg dma by sending last command.
	 * @ctx       : ctl path ctx pointer
	 * @blocking  : if set to true api will block until flush is done
	 */
	void (*reg_dma_flush)(struct sde_hw_ctl *ctx);
	void (*reg_dma_flush)(struct sde_hw_ctl *ctx, bool blocking);

};

+39 −2
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */
#include <linux/iopoll.h>
#include "sde_hw_mdss.h"
#include "sde_hw_ctl.h"
#include "sde_hw_reg_dma_v1.h"
@@ -93,6 +94,20 @@ static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
	[PCC] = GRP_DSPP_HW_BLK_SELECT,
};

static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
	[CTL_0][0] = BIT(16),
	[CTL_0][1] = BIT(21),
	[CTL_1][0] = BIT(17),
	[CTL_1][1] = BIT(22),
	[CTL_2][0] = BIT(18),
	[CTL_2][1] = BIT(23),
	[CTL_3][0] = BIT(19),
	[CTL_3][1] = BIT(24),
};

static int reg_dma_int_status_off;
static int reg_dma_clear_status_off;

static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
@@ -110,7 +125,8 @@ static int check_support_v1(enum sde_reg_dma_features feature,
static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
static int reset_v1(struct sde_hw_ctl *ctl);
static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q);
static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
		enum sde_reg_dma_last_cmd_mode mode);
static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);

@@ -466,6 +482,8 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)

	SET_UP_REG_DMA_REG(hw, reg_dma);
	SDE_REG_WRITE(&hw, REG_DMA_OP_MODE_OFF, BIT(0));
	SDE_REG_WRITE(&hw, reg_dma_clear_status_off,
		ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select]);
	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
			cfg->dma_buf->iova);
	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
@@ -518,6 +536,8 @@ int init_v1(struct sde_hw_reg_dma *cfg)
	for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
		reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
			(sizeof(u32) * 4);
	reg_dma_int_status_off = 0x90;
	reg_dma_clear_status_off = 0xa0;

	return 0;
}
@@ -782,10 +802,14 @@ static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
	return 0;
}

static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q)
static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
		enum sde_reg_dma_last_cmd_mode mode)
{
	struct sde_reg_dma_setup_ops_cfg cfg;
	struct sde_reg_dma_kickoff_cfg kick_off;
	struct sde_hw_blk_reg_map hw;
	u32 val;
	int rc;

	if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
		DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
@@ -821,6 +845,19 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q)
		return -EINVAL;
	}

	memset(&hw, 0, sizeof(hw));
	SET_UP_REG_DMA_REG(hw, reg_dma);

	if (mode == REG_DMA_WAIT4_COMP) {
		rc = readl_poll_timeout(hw.base_off + hw.blk_off +
			reg_dma_int_status_off, val,
			(val & ctl_trigger_done_mask[ctl->idx][q]),
			10, 20000);
		if (rc)
			DRM_ERROR("poll wait failed %d val %x mask %x\n",
			    rc, val, ctl_trigger_done_mask[ctl->idx][q]);
	}

	return 0;
}

+1 −1
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ static int default_buf_reset_reg_dma(struct sde_reg_dma_buffer *lut_buf)
}

static int default_last_command(struct sde_hw_ctl *ctl,
		enum sde_reg_dma_queue q)
		enum sde_reg_dma_queue q, enum sde_reg_dma_last_cmd_mode mode)
{
	return 0;
}
Loading