Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2c9f916 authored by Xiaoming Zhou's avatar Xiaoming Zhou Committed by Matt Wagantall
Browse files

msm: mdss: support atomic commit wfd use case



Add the support for atomic commet wfd use case, where
the output buffer is specific in the atomic commit
structure.

Change-Id: I261392df132d4321daccb05d6ba7f5d9ea622236
Signed-off-by: default avatarXiaoming Zhou <zhoux@codeaurora.org>
parent b00d3148
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -15,9 +15,9 @@ mdss-mdp-objs += mdss_mdp_rotator.o
mdss-mdp-objs += mdss_mdp_overlay.o
mdss-mdp-objs += mdss_mdp_layer.o
mdss-mdp-objs += mdss_mdp_splash_logo.o
mdss-mdp-objs += mdss_mdp_wb.o
mdss-mdp-objs += mdss_mdp_cdm.o
mdss-mdp-objs += mdss_smmu.o
mdss-mdp-objs += mdss_mdp_wfd.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_mdp_debug.o

+22 −3
Original line number Diff line number Diff line
@@ -278,6 +278,7 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
	u32 layer_count;
	struct mdp_input_layer *layer_list = NULL, *layer;
	struct mdp_input_layer32 *layer_list32 = NULL;
	struct mdp_output_layer *output_layer = NULL;

	/* copy top level memory from 32 bit structure to kernel memory */
	ret = copy_from_user(&commit32, (void __user *)argp,
@@ -288,9 +289,27 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
	}
	__copy_atomic_commit_struct(&commit, &commit32);

	if (commit32.commit_v1.output_layer) {
		int buffer_size = sizeof(struct mdp_output_layer);
		output_layer = kzalloc(buffer_size, GFP_KERNEL);
		if (!output_layer) {
			pr_err("fail to allocate output layer\n");
			return -ENOMEM;
		}
		ret = copy_from_user(output_layer,
				commit32.commit_v1.output_layer, buffer_size);
		if (ret) {
			pr_err("fail to copy output layer from user\n");
			goto layer_list_err;
		}

		commit.commit_v1.output_layer = output_layer;
	}

	layer_count = commit32.commit_v1.input_layer_cnt;
	if (layer_count > MAX_LAYER_COUNT) {
		return -EINVAL;
		ret = -EINVAL;
		goto layer_list_err;
	} else if (layer_count) {
		/*
		 * allocate memory for layer list in 32bit domain and copy it
@@ -299,7 +318,7 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
		layer_list32 = __create_layer_list32(&commit32, layer_count);
		if (IS_ERR_OR_NULL(layer_list32)) {
			ret = PTR_ERR(layer_list32);
			goto end;
			goto layer_list_err;
		}

		/*
@@ -327,7 +346,7 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
	kfree(layer_list);
layer_list_err:
	kfree(layer_list32);
end:
	kfree(output_layer);
	return ret;
}

+24 −1
Original line number Diff line number Diff line
@@ -3372,6 +3372,8 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
	struct mdp_input_layer *layer, *layer_list = NULL;
	struct mdp_input_layer __user *input_layer_list;
	struct mdp_scale_data *scale;
	struct mdp_output_layer *output_layer = NULL;
	struct mdp_output_layer __user *output_layer_user;

	ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
	if (ret) {
@@ -3379,11 +3381,30 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
		return ret;
	}

	output_layer_user = commit.commit_v1.output_layer;
	if (output_layer_user) {
		buffer_size = sizeof(struct mdp_output_layer);
		output_layer = kzalloc(buffer_size, GFP_KERNEL);
		if (!output_layer) {
			pr_err("unable to allocate memory for output layer\n");
			return -ENOMEM;
		}

		ret = copy_from_user(output_layer,
			output_layer_user, buffer_size);
		if (ret) {
			pr_err("layer list copy from user failed\n");
			goto err;
		}
		commit.commit_v1.output_layer = output_layer;
	}

	layer_count = commit.commit_v1.input_layer_cnt;
	input_layer_list = commit.commit_v1.input_layers;

	if (layer_count > MAX_LAYER_COUNT) {
		return -EINVAL;
		ret = -EINVAL;
		goto err;
	} else if (layer_count) {
		buffer_size = sizeof(struct mdp_input_layer) * layer_count;
		layer_list = kmalloc(buffer_size, GFP_KERNEL);
@@ -3438,6 +3459,7 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
			pr_err("layer error code copy to user failed\n");

		commit.commit_v1.input_layers = input_layer_list;
		commit.commit_v1.output_layer = output_layer_user;
		rc = copy_to_user(argp, &commit,
			sizeof(struct mdp_layer_commit));
		if (rc)
@@ -3448,6 +3470,7 @@ err:
	for (i--; i >= 0; i--)
		kfree(layer_list[i].scale);
	kfree(layer_list);
	kfree(output_layer);
	return ret;
}

+15 −1
Original line number Diff line number Diff line
@@ -525,6 +525,8 @@ struct mdss_mdp_writeback_arg {
	void *priv_data;
};

struct mdss_mdp_wfd;

struct mdss_overlay_private {
	ktime_t vsync_time;
	struct kernfs_node *vsync_event_sd;
@@ -537,7 +539,7 @@ struct mdss_overlay_private {
	struct mutex ov_lock;
	struct mutex dfps_lock;
	struct mdss_mdp_ctl *ctl;
	struct mdss_mdp_wb *wb;
	struct mdss_mdp_wfd *wfd;

	struct mutex list_lock;
	struct list_head pipes_used;
@@ -874,6 +876,12 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
	struct mdp_layer_commit_v1 *ov_commit);
int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
	struct mdp_layer_commit_v1 *ov_commit);

int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd,
	struct mdp_layer_commit_v1 *ov_commit);
int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
	struct mdp_layer_commit_v1 *ov_commit);

int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
			       struct mdp_overlay *req,
			       struct mdss_mdp_format_params *fmt);
@@ -1112,5 +1120,11 @@ int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version);

struct mdss_mdp_writeback *mdss_mdp_wb_alloc(u32 caps, u32 reg_index);
void mdss_mdp_wb_free(struct mdss_mdp_writeback *wb);
struct mdss_mdp_writeback *mdss_mdp_wb_assign(u32 num, u32 reg_index);

struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
		struct mdss_mdp_ctl *ctl, u32 type, int mux, int rotator);
struct mdss_mdp_mixer *mdss_mdp_mixer_assign(u32 id, bool wb);
int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);

#endif /* MDSS_MDP_H */
+74 −79
Original line number Diff line number Diff line
@@ -46,7 +46,6 @@ static inline u64 apply_fudge_factor(u64 val,

static DEFINE_MUTEX(mdss_mdp_ctl_lock);

static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);
static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer);

static inline u32 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
@@ -1620,7 +1619,7 @@ static int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl)
 * Return: mdp mixer structure that is allocated.
 *	   NULL if mixer allocation fails.
 */
static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
		struct mdss_mdp_ctl *ctl, u32 type, int mux, int rotator)
{
	struct mdss_mdp_mixer *mixer = NULL, *alt_mixer = NULL;
@@ -1706,7 +1705,30 @@ static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
	return mixer;
}

static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer)
struct mdss_mdp_mixer *mdss_mdp_mixer_assign(u32 id, bool wb)
{
	struct mdss_mdp_mixer *mixer = NULL;
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	mutex_lock(&mdss_mdp_ctl_lock);

	if (wb && id < mdata->nmixers_wb)
		mixer = mdata->mixer_wb + id;
	else if (!wb && id < mdata->nmixers_intf)
		mixer = mdata->mixer_intf + id;

	if (mixer && mixer->ref_cnt == 0) {
		mixer->ref_cnt++;
		mixer->params_changed++;
	} else {
		pr_err("mixer is in use already = %d\n", id);
		mixer = NULL;
	}
	mutex_unlock(&mdss_mdp_ctl_lock);
	return mixer;
}

int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer)
{
	if (!mixer)
		return -ENODEV;
@@ -1950,6 +1972,10 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
	}

	pinfo = &ctl->panel_data->panel_info;
	if (pinfo->type == WRITEBACK_PANEL) {
		pr_err("writeback panel, ignore\n");
		return 0;
	}

	split_ctl = mdss_mdp_get_split_ctl(ctl);

@@ -2041,71 +2067,6 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
	return 0;
}

static int mdss_mdp_ctl_setup_wfd(struct mdss_mdp_ctl *ctl)
{
	struct mdss_data_type *mdata = ctl->mdata;
	struct mdss_mdp_mixer *mixer = NULL;
	struct mdss_mdp_writeback *wb = NULL;
	u32 caps;
	int mixer_type, ret = 0;

	/* if WB2 is supported, try to allocate it first */
	if (mdata->wfd_mode == MDSS_MDP_WFD_INTERFACE)
		mixer_type = MDSS_MDP_MIXER_TYPE_INTF;
	else
		mixer_type = MDSS_MDP_MIXER_TYPE_WRITEBACK;

	mixer = mdss_mdp_mixer_alloc(ctl, mixer_type, false, 0);
	if (!mixer && mixer_type == MDSS_MDP_MIXER_TYPE_INTF)
		mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK,
				false, 0);

	if (!mixer) {
		pr_err("Unable to allocate writeback mixer\n");
		return -ENOMEM;
	}

	caps = MDSS_MDP_WB_WFD;
	if (mixer->type != MDSS_MDP_MIXER_TYPE_WRITEBACK)
		caps |= MDSS_MDP_WB_INTF;
	wb = mdss_mdp_wb_alloc(caps, ctl->num);
	if (!wb) {
		pr_err("Unable to allocate writeback block\n");
		ret = -ENODEV;
		goto setup_wfd_err;
	}

	if (mixer->type != MDSS_MDP_MIXER_TYPE_WRITEBACK ||
			(mdata->wfd_mode == MDSS_MDP_WFD_DEDICATED)) {
		ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
	} else {
		switch (mixer->num) {
		case MDSS_MDP_WB_LAYERMIXER0:
			ctl->opmode = MDSS_MDP_CTL_OP_WB0_MODE;
			break;
		case MDSS_MDP_WB_LAYERMIXER1:
			ctl->opmode = MDSS_MDP_CTL_OP_WB1_MODE;
			break;
		default:
			pr_err("Incorrect writeback config num=%d\n",
					mixer->num);
			ret = -EINVAL;
			goto setup_wfd_err;
		}
		ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_LINE;
	}
	ctl->mixer_left = mixer;
	ctl->wb = wb;
	return 0;

setup_wfd_err:
	mdss_mdp_mixer_free(mixer);
	if (wb)
		mdss_mdp_wb_free(wb);

	return ret;
}

struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
				       struct msm_fb_data_type *mfd)
{
@@ -2178,9 +2139,6 @@ struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
	case WRITEBACK_PANEL:
		ctl->intf_num = MDSS_MDP_NO_INTF;
		ctl->start_fnc = mdss_mdp_writeback_start;
		ret = mdss_mdp_ctl_setup_wfd(ctl);
		if (ret)
			goto ctl_init_fail;
		break;
	default:
		pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
@@ -2485,8 +2443,10 @@ static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
	}

	mixer = ctl->mixer_left;
	if (mixer) {
		mdss_mdp_pp_resume(ctl, mixer->num);
		mixer->params_changed++;
	}

	temp = readl_relaxed(ctl->mdata->mdp_base +
		MDSS_MDP_REG_DISP_INTF_SEL);
@@ -2497,6 +2457,7 @@ static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
	writel_relaxed(temp, ctl->mdata->mdp_base +
		MDSS_MDP_REG_DISP_INTF_SEL);

	if (mixer) {
		outsize = (mixer->height << 16) | mixer->width;
		mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);

@@ -2504,7 +2465,7 @@ static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
			ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
					&ctl->panel_data->panel_info);
		}

	}
	return ret;
}

@@ -3854,6 +3815,40 @@ struct mdss_mdp_writeback *mdss_mdp_wb_alloc(u32 caps, u32 reg_index)
	return wb;
}

struct mdss_mdp_writeback *mdss_mdp_wb_assign(u32 num, u32 reg_index)
{
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
	struct mdss_mdp_writeback *wb = NULL;
	bool wb_virtual_on;

	wb_virtual_on = (mdata->nctl == mdata->nwb_offsets);

	if (num >= mdata->nwb)
		return NULL;

	if (wb_virtual_on && reg_index >= mdata->nwb_offsets)
		return NULL;

	mutex_lock(&mdata->wb_lock);
	wb = mdata->wb + num;
	if (atomic_read(&wb->kref.refcount) == 0)
		kref_init(&wb->kref);
	else
		wb = NULL;
	mutex_unlock(&mdata->wb_lock);

	if (!wb)
		return NULL;

	wb->base = mdata->mdss_io.base;
	if (wb_virtual_on)
		wb->base += mdata->wb_offsets[reg_index];
	else
		wb->base += mdata->wb_offsets[num];

	return wb;
}

static void mdss_mdp_wb_release(struct kref *kref)
{
	struct mdss_mdp_writeback *wb =
Loading