Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b18fc2b authored by Veera Sundaram Sankaran's avatar Veera Sundaram Sankaran
Browse files

msm: mdss: add support for async position updates



Expose new ioctl for updating the layer position asynchronously.
Initially, pipes should be configured with async_update flag set
during the atomic commit, after which any number of position update
calls can be made. This would enable multiple position updates
within a single vsync. However, the screen update would happen
only after vsync, which would pick the latest update. Currently,
supported for video mode panels with single LM or dual LM with
src_split enabled. Only position updates are possible with no
scaling/cropping and each async layer should have unique z_order.

Change-Id: Ibb2804b59c6c980411396120e63167f18df5bc5b
Signed-off-by: default avatarVeera Sundaram Sankaran <veeras@codeaurora.org>
parent a5f9d251
Loading
Loading
Loading
Loading
+99 −0
Original line number Diff line number Diff line
@@ -52,6 +52,9 @@
						struct mdp_overlay_list32)
#define MSMFB_ATOMIC_COMMIT32	_IOWR(MDP_IOCTL_MAGIC, 128, compat_caddr_t)

#define MSMFB_ASYNC_POSITION_UPDATE_32 _IOWR(MDP_IOCTL_MAGIC, 129, \
		struct mdp_position_update32)

static unsigned int __do_compat_ioctl_nr(unsigned int cmd32)
{
	unsigned int cmd;
@@ -99,6 +102,9 @@ static unsigned int __do_compat_ioctl_nr(unsigned int cmd32)
	case MSMFB_ATOMIC_COMMIT32:
		cmd = MSMFB_ATOMIC_COMMIT;
		break;
	case MSMFB_ASYNC_POSITION_UPDATE_32:
		cmd = MSMFB_ASYNC_POSITION_UPDATE;
		break;
	default:
		cmd = cmd32;
		break;
@@ -350,6 +356,96 @@ layer_list_err:
	return ret;
}

static int __copy_to_user_async_position_update(
		struct mdp_position_update *update_pos,
		struct mdp_position_update32 *update_pos32,
		unsigned long argp, u32 layer_cnt)
{
	int ret;

	ret = copy_to_user(update_pos32->input_layers,
			update_pos->input_layers,
			sizeof(struct mdp_async_layer) * layer_cnt);
	if (ret)
		goto end;

	ret = copy_to_user((void __user *) argp, update_pos32,
			sizeof(struct mdp_position_update32));

end:
	return ret;
}

static struct mdp_async_layer *__create_async_layer_list(
	struct mdp_position_update32 *update_pos32, u32 layer_cnt)
{
	u32 buffer_size;
	struct mdp_async_layer *layer_list;
	int ret;

	buffer_size = sizeof(struct mdp_async_layer) * layer_cnt;

	layer_list = kmalloc(buffer_size, GFP_KERNEL);
	if (!layer_list) {
		layer_list = ERR_PTR(-ENOMEM);
		goto end;
	}

	ret = copy_from_user(layer_list,
			update_pos32->input_layers, buffer_size);
	if (ret) {
		pr_err("layer list32 copy from user failed\n");
		kfree(layer_list);
		layer_list = ERR_PTR(ret);
	}

end:
	return layer_list;
}

static int __compat_async_position_update(struct fb_info *info,
		unsigned int cmd, unsigned long argp)
{
	struct mdp_position_update update_pos;
	struct mdp_position_update32 update_pos32;
	struct mdp_async_layer *layer_list = NULL;
	u32 layer_cnt, ret;

	/* copy top level memory from 32 bit structure to kernel memory */
	ret = copy_from_user(&update_pos32, (void __user *)argp,
		sizeof(struct mdp_position_update32));
	if (ret) {
		pr_err("%s:copy_from_user failed\n", __func__);
		return ret;
	}

	update_pos.input_layer_cnt = update_pos32.input_layer_cnt;
	layer_cnt = update_pos32.input_layer_cnt;
	if (!layer_cnt) {
		pr_err("no async layer to update\n");
		return -EINVAL;
	}

	layer_list = __create_async_layer_list(&update_pos32,
		layer_cnt);
	if (IS_ERR_OR_NULL(layer_list))
		return PTR_ERR(layer_list);

	update_pos.input_layers = layer_list;

	ret = mdss_fb_async_position_update(info, &update_pos);
	if (ret)
		pr_err("async position update failed ret:%d\n", ret);

	ret = __copy_to_user_async_position_update(&update_pos, &update_pos32,
			argp, layer_cnt);
	if (ret)
		pr_err("copy to user of async update position failed\n");

	kfree(layer_list);
	return ret;
}

static int mdss_fb_compat_buf_sync(struct fb_info *info, unsigned int cmd,
			 unsigned long arg)
{
@@ -3677,6 +3773,9 @@ int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd,
	case MSMFB_ATOMIC_COMMIT:
		ret = __compat_atomic_commit(info, cmd, arg);
		break;
	case MSMFB_ASYNC_POSITION_UPDATE:
		ret = __compat_async_position_update(info, cmd, arg);
		break;
	case MSMFB_MDP_PP:
	case MSMFB_HISTOGRAM_START:
	case MSMFB_HISTOGRAM_STOP:
+5 −0
Original line number Diff line number Diff line
@@ -532,4 +532,9 @@ struct mdp_layer_commit32 {
	};
};

struct mdp_position_update32 {
	compat_caddr_t __user	*input_layers;
	uint32_t input_layer_cnt;
};

#endif
+75 −0
Original line number Diff line number Diff line
@@ -3494,6 +3494,77 @@ static int mdss_fb_cursor(struct fb_info *info, void __user *p)
	return mfd->mdp.cursor_update(mfd, &cursor);
}

int mdss_fb_async_position_update(struct fb_info *info,
		struct mdp_position_update *update_pos)
{
	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;

	if (!update_pos->input_layer_cnt) {
		pr_err("no input layers for position update\n");
		return -EINVAL;
	}
	return mfd->mdp.async_position_update(mfd, update_pos);
}

static int mdss_fb_async_position_update_ioctl(struct fb_info *info,
		unsigned long *argp)
{
	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
	struct mdp_position_update update_pos;
	int ret, rc;
	u32 buffer_size, layer_cnt;
	struct mdp_async_layer *layer_list = NULL;
	struct mdp_async_layer __user *input_layer_list;

	if (!mfd->mdp.async_position_update)
		return -ENODEV;

	ret = copy_from_user(&update_pos, argp, sizeof(update_pos));
	if (ret) {
		pr_err("copy from user failed\n");
		return ret;
	}
	input_layer_list = update_pos.input_layers;

	layer_cnt = update_pos.input_layer_cnt;
	if (!layer_cnt) {
		pr_err("no async layers to update\n");
		return -EINVAL;
	}

	buffer_size = sizeof(struct mdp_async_layer) * layer_cnt;
	layer_list = kmalloc(buffer_size, GFP_KERNEL);
	if (!layer_list) {
		pr_err("unable to allocate memory for layers\n");
		return -ENOMEM;
	}

	ret = copy_from_user(layer_list, input_layer_list, buffer_size);
	if (ret) {
		pr_err("layer list copy from user failed\n");
		goto end;
	}
	update_pos.input_layers = layer_list;

	ret = mdss_fb_async_position_update(info, &update_pos);
	if (ret)
		pr_err("async position update failed ret:%d\n", ret);

	rc = copy_to_user(input_layer_list, layer_list, buffer_size);
	if (rc)
		pr_err("layer error code copy to user failed\n");

	update_pos.input_layers = input_layer_list;
	rc = copy_to_user(argp, &update_pos,
			sizeof(struct mdp_position_update));
	if (rc)
		pr_err("copy to user for layers failed");

end:
	kfree(layer_list);
	return ret;
}

static int mdss_fb_set_lut(struct fb_info *info, void __user *p)
{
	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
@@ -4040,6 +4111,10 @@ int mdss_fb_do_ioctl(struct fb_info *info, unsigned int cmd,
		ret = mdss_fb_atomic_commit_ioctl(info, argp);
		break;

	case MSMFB_ASYNC_POSITION_UPDATE:
		ret = mdss_fb_async_position_update_ioctl(info, argp);
		break;

	default:
		if (mfd->mdp.ioctl_handler)
			ret = mfd->mdp.ioctl_handler(mfd, cmd, argp);
+5 −0
Original line number Diff line number Diff line
@@ -205,6 +205,8 @@ struct msm_mdp_interface {
	void (*dma_fnc)(struct msm_fb_data_type *mfd);
	int (*cursor_update)(struct msm_fb_data_type *mfd,
				struct fb_cursor *cursor);
	int (*async_position_update)(struct msm_fb_data_type *mfd,
				struct mdp_position_update *update_pos);
	int (*lut_update)(struct msm_fb_data_type *mfd, struct fb_cmap *cmap);
	int (*do_histogram)(struct msm_fb_data_type *mfd,
				struct mdp_histogram *hist);
@@ -428,5 +430,8 @@ int mdss_fb_compat_ioctl(struct fb_info *info, unsigned int cmd,
			 unsigned long arg);
int mdss_fb_atomic_commit(struct fb_info *info,
	struct mdp_layer_commit  *commit);
int mdss_fb_async_position_update(struct fb_info *info,
		struct mdp_position_update *update_pos);

u32 mdss_fb_get_mode_switch(struct msm_fb_data_type *mfd);
#endif /* MDSS_FB_H */
+13 −1
Original line number Diff line number Diff line
@@ -83,6 +83,10 @@

#define XIN_HALT_TIMEOUT_US	0x4000

/* hw cursor can only be setup in highest mixer stage */
#define HW_CURSOR_STAGE(mdata) \
	(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)

enum mdss_mdp_perf_state_type {
	PERF_SW_COMMIT_STATE = 0,
	PERF_HW_MDP_STATE,
@@ -537,6 +541,7 @@ struct mdss_mdp_pipe {
	struct mdp_input_layer layer;
	u32 params_changed;
	bool dirty;
	bool async_update;

	struct mdss_mdp_pipe_smp_map smp_map[MAX_PLANES];

@@ -956,6 +961,9 @@ int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
	struct mdp_layer_commit_v1 *ov_commit);

int mdss_mdp_async_position_update(struct msm_fb_data_type *mfd,
		struct mdp_position_update *update_pos);

int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
			       struct mdp_overlay *req,
			       struct mdss_mdp_format_params *fmt);
@@ -974,6 +982,8 @@ int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe);
struct mdss_mdp_pipe *mdss_mdp_pipe_assign(struct mdss_data_type *mdata,
	struct mdss_mdp_mixer *mixer, u32 ndx);
void mdss_mdp_pipe_position_update(struct mdss_mdp_pipe *pipe,
		struct mdss_rect *src, struct mdss_rect *dst);
int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
		u32 *offsets,  u32 count);
int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
@@ -1038,7 +1048,9 @@ int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl,
	enum mdss_mdp_perf_state_type component, bool new_status);
void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl);

int mdss_mdp_async_ctl_flush(struct msm_fb_data_type *mfd,
		u32 flush_bits);
int mdss_mdp_get_pipe_flush_bits(struct mdss_mdp_pipe *pipe);
struct mdss_mdp_mixer *mdss_mdp_block_mixer_alloc(void);
int mdss_mdp_block_mixer_destroy(struct mdss_mdp_mixer *mixer);
struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux);
Loading