Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c7eac58 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-msm-next-2018-06-04' of git://people.freedesktop.org/~robclark/linux into drm-next

parents 568cf2e6 74d3a3a7
Loading
Loading
Loading
Loading
+1 −16
Original line number Diff line number Diff line
@@ -98,21 +98,6 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
		.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};

static int mdp4_plane_prepare_fb(struct drm_plane *plane,
				 struct drm_plane_state *new_state)
{
	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
	struct mdp4_kms *mdp4_kms = get_kms(plane);
	struct msm_kms *kms = &mdp4_kms->base.base;
	struct drm_framebuffer *fb = new_state->fb;

	if (!fb)
		return 0;

	DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
	return msm_framebuffer_prepare(fb, kms->aspace);
}

static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
				  struct drm_plane_state *old_state)
{
@@ -152,7 +137,7 @@ static void mdp4_plane_atomic_update(struct drm_plane *plane,
}

static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
		.prepare_fb = mdp4_plane_prepare_fb,
		.prepare_fb = msm_atomic_prepare_fb,
		.cleanup_fb = mdp4_plane_cleanup_fb,
		.atomic_check = mdp4_plane_atomic_check,
		.atomic_update = mdp4_plane_atomic_update,
+9 −0
Original line number Diff line number Diff line
@@ -430,6 +430,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
	struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
	struct mdp5_kms *mdp5_kms = get_kms(crtc);
	struct device *dev = &mdp5_kms->pdev->dev;
	unsigned long flags;

	DBG("%s", crtc->name);

@@ -445,6 +446,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
	pm_runtime_put_sync(dev);

	if (crtc->state->event && !crtc->state->active) {
		WARN_ON(mdp5_crtc->event);
		spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
		drm_crtc_send_vblank_event(crtc, crtc->state->event);
		crtc->state->event = NULL;
		spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
	}

	mdp5_crtc->enabled = false;
}

+76 −29
Original line number Diff line number Diff line
@@ -70,60 +70,110 @@ static int mdp5_hw_init(struct msm_kms *kms)
	return 0;
}

struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
/* Global/shared object state funcs */

/*
 * This is a helper that returns the private state currently in operation.
 * Note that this would return the "old_state" if called in the atomic check
 * path, and the "new_state" after the atomic swap has been done.
 */
struct mdp5_global_state *
mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
{
	return to_mdp5_global_state(mdp5_kms->glob_state.state);
}

/*
 * This acquires the modeset lock set aside for global state, creates
 * a new duplicated private object state.
 */
struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
{
	struct msm_drm_private *priv = s->dev->dev_private;
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
	struct msm_kms_state *state = to_kms_state(s);
	struct mdp5_state *new_state;
	struct drm_private_state *priv_state;
	int ret;

	if (state->state)
		return state->state;

	ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
	ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
	if (ret)
		return ERR_PTR(ret);

	new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
	if (!new_state)
		return ERR_PTR(-ENOMEM);
	priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
	if (IS_ERR(priv_state))
		return ERR_CAST(priv_state);

	/* Copy state: */
	new_state->hwpipe = mdp5_kms->state->hwpipe;
	new_state->hwmixer = mdp5_kms->state->hwmixer;
	if (mdp5_kms->smp)
		new_state->smp = mdp5_kms->state->smp;
	return to_mdp5_global_state(priv_state);
}

static struct drm_private_state *
mdp5_global_duplicate_state(struct drm_private_obj *obj)
{
	struct mdp5_global_state *state;

	state->state = new_state;
	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
	if (!state)
		return NULL;

	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);

	return new_state;
	return &state->base;
}

static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
static void mdp5_global_destroy_state(struct drm_private_obj *obj,
				      struct drm_private_state *state)
{
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
	swap(to_kms_state(state)->state, mdp5_kms->state);
	struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);

	kfree(mdp5_state);
}

static const struct drm_private_state_funcs mdp5_global_state_funcs = {
	.atomic_duplicate_state = mdp5_global_duplicate_state,
	.atomic_destroy_state = mdp5_global_destroy_state,
};

static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
{
	struct mdp5_global_state *state;

	drm_modeset_lock_init(&mdp5_kms->glob_state_lock);

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (!state)
		return -ENOMEM;

	state->mdp5_kms = mdp5_kms;

	drm_atomic_private_obj_init(&mdp5_kms->glob_state,
				    &state->base,
				    &mdp5_global_state_funcs);
	return 0;
}

static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
	struct device *dev = &mdp5_kms->pdev->dev;
	struct mdp5_global_state *global_state;

	global_state = mdp5_get_existing_global_state(mdp5_kms);

	pm_runtime_get_sync(dev);

	if (mdp5_kms->smp)
		mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
		mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
}

static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
	struct device *dev = &mdp5_kms->pdev->dev;
	struct mdp5_global_state *global_state;

	global_state = mdp5_get_existing_global_state(mdp5_kms);

	if (mdp5_kms->smp)
		mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
		mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);

	pm_runtime_put_sync(dev);
}
@@ -229,7 +279,6 @@ static const struct mdp_kms_funcs kms_funcs = {
		.irq             = mdp5_irq,
		.enable_vblank   = mdp5_enable_vblank,
		.disable_vblank  = mdp5_disable_vblank,
		.swap_state      = mdp5_swap_state,
		.prepare_commit  = mdp5_prepare_commit,
		.complete_commit = mdp5_complete_commit,
		.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
@@ -727,7 +776,8 @@ static void mdp5_destroy(struct platform_device *pdev)
	if (mdp5_kms->rpm_enabled)
		pm_runtime_disable(&pdev->dev);

	kfree(mdp5_kms->state);
	drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
	drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
}

static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
@@ -880,12 +930,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
	mdp5_kms->dev = dev;
	mdp5_kms->pdev = pdev;

	drm_modeset_lock_init(&mdp5_kms->state_lock);
	mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
	if (!mdp5_kms->state) {
		ret = -ENOMEM;
	ret = mdp5_global_obj_init(mdp5_kms);
	if (ret)
		goto fail;
	}

	mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
	if (IS_ERR(mdp5_kms->mmio)) {
+15 −12
Original line number Diff line number Diff line
@@ -28,8 +28,6 @@
#include "mdp5_ctl.h"
#include "mdp5_smp.h"

struct mdp5_state;

struct mdp5_kms {
	struct mdp_kms base;

@@ -49,11 +47,12 @@ struct mdp5_kms {
	struct mdp5_cfg_handler *cfg;
	uint32_t caps;	/* MDP capabilities (MDP_CAP_XXX bits) */

	/**
	 * Global atomic state.  Do not access directly, use mdp5_get_state()
	/*
	 * Global private object state, Do not access directly, use
	 * mdp5_global_get_state()
	 */
	struct mdp5_state *state;
	struct drm_modeset_lock state_lock;
	struct drm_modeset_lock glob_state_lock;
	struct drm_private_obj glob_state;

	struct mdp5_smp *smp;
	struct mdp5_ctl_manager *ctlm;
@@ -81,19 +80,23 @@ struct mdp5_kms {
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)

/* Global atomic state for tracking resources that are shared across
/* Global private object state for tracking resources that are shared across
 * multiple kms objects (planes/crtcs/etc).
 *
 * For atomic updates which require modifying global state,
 */
struct mdp5_state {
#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base)
struct mdp5_global_state {
	struct drm_private_state base;

	struct drm_atomic_state *state;
	struct mdp5_kms *mdp5_kms;

	struct mdp5_hw_pipe_state hwpipe;
	struct mdp5_hw_mixer_state hwmixer;
	struct mdp5_smp_state smp;
};

struct mdp5_state *__must_check
mdp5_get_state(struct drm_atomic_state *s);
struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms);
struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s);

/* Atomic plane state.  Subclasses the base drm_plane_state in order to
 * track assigned hwpipe and hw specific state.
+6 −6
Original line number Diff line number Diff line
@@ -52,14 +52,14 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
{
	struct msm_drm_private *priv = s->dev->dev_private;
	struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
	struct mdp5_state *state = mdp5_get_state(s);
	struct mdp5_global_state *global_state = mdp5_get_global_state(s);
	struct mdp5_hw_mixer_state *new_state;
	int i;

	if (IS_ERR(state))
		return PTR_ERR(state);
	if (IS_ERR(global_state))
		return PTR_ERR(global_state);

	new_state = &state->hwmixer;
	new_state = &global_state->hwmixer;

	for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
		struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
@@ -129,8 +129,8 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,

void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
	struct mdp5_state *state = mdp5_get_state(s);
	struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
	struct mdp5_global_state *global_state = mdp5_get_global_state(s);
	struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;

	if (!mixer)
		return;
Loading