Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7fe352a3 authored by Jin Li's avatar Jin Li
Browse files

Merge remote-tracking branch 'origin/dev/msm-3.18-drm_kms' into msm-3.18



* origin/dev/msm-3.18-drm_kms:
  drm/msm: create separate commit thread for each display
  drm/msm: add error logging in msm_atomic

Change-Id: If1b6f16747a5785635a508ca845548b5db4e3df2
Signed-off-by: default avatarJin Li <jinl@codeaurora.org>
parents 5d95fbb8 65f02ce9
Loading
Loading
Loading
Loading
+115 −51
Original line number Diff line number Diff line
@@ -26,10 +26,9 @@ struct msm_commit {
	uint32_t fence;
	struct msm_fence_cb fence_cb;
	uint32_t crtc_mask;
	struct kthread_work commit_work;
};

static void fence_cb(struct msm_fence_cb *cb);

/* block until specified crtcs are no longer pending update, and
 * atomically mark them as pending update
 */
@@ -60,29 +59,10 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
	spin_unlock(&priv->pending_crtcs_event.lock);
}

static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
	struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);

	if (!c)
		return NULL;

	c->dev = state->dev;
	c->state = state;

	/* TODO we might need a way to indicate to run the cb on a
	 * different wq so wait_for_vblanks() doesn't block retiring
	 * bo's..
	 */
	INIT_FENCE_CB(&c->fence_cb, fence_cb);

	return c;
}

static void commit_destroy(struct msm_commit *c)
static void commit_destroy(struct msm_commit *commit)
{
	end_atomic(c->dev->dev_private, c->crtc_mask);
	kfree(c);
	end_atomic(commit->dev->dev_private, commit->crtc_mask);
	kfree(commit);
}

static void msm_atomic_wait_for_commit_done(
@@ -101,10 +81,7 @@ static void msm_atomic_wait_for_commit_done(

		crtc = old_state->crtcs[i];

		if (!crtc)
			continue;

		if (!crtc->state->enable)
		if (!crtc || !crtc->state || !crtc->state->enable)
			continue;

		/* If specified, only wait if requested flag is true */
@@ -117,6 +94,7 @@ static void msm_atomic_wait_for_commit_done(
		if (old_state->legacy_cursor_update)
			continue;

		if (kms->funcs->wait_for_crtc_commit_done)
			kms->funcs->wait_for_crtc_commit_done(kms, crtc);
	}
}
@@ -283,7 +261,7 @@ msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 * and do the plane commits at the end. This is useful for drivers doing runtime
 * PM since planes updates then only happen when the CRTC is actually enabled.
 */
void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
static void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
		struct drm_atomic_state *old_state)
{
	msm_disable_outputs(dev, old_state);
@@ -412,9 +390,9 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
/* The (potentially) asynchronous part of the commit.  At this point
 * nothing can fail short of armageddon.
 */
static void complete_commit(struct msm_commit *c)
static void complete_commit(struct msm_commit *commit)
{
	struct drm_atomic_state *state = c->state;
	struct drm_atomic_state *state = commit->state;
	struct drm_device *dev = state->dev;
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_kms *kms = priv->kms;
@@ -448,20 +426,99 @@ static void complete_commit(struct msm_commit *c)

	drm_atomic_state_free(state);

	commit_destroy(c);
	commit_destroy(commit);
}

static void fence_cb(struct msm_fence_cb *cb)
{
	struct msm_commit *c =
	struct msm_commit *commit =
			container_of(cb, struct msm_commit, fence_cb);
	complete_commit(c);
	complete_commit(commit);
}

static void _msm_drm_commit_work_cb(struct kthread_work *work)
{
	struct msm_commit *commit =  NULL;

	if (!work) {
		DRM_ERROR("%s: Invalid commit work data!\n", __func__);
		return;
	}

	commit = container_of(work, struct msm_commit, commit_work);

	complete_commit(commit);
}

static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
	struct msm_commit *commit = kzalloc(sizeof(*commit), GFP_KERNEL);

	if (!commit) {
		DRM_ERROR("invalid commit\n");
		return ERR_PTR(-ENOMEM);
	}

	commit->dev = state->dev;
	commit->state = state;

	/* TODO we might need a way to indicate to run the cb on a
	 * different wq so wait_for_vblanks() doesn't block retiring
	 * bo's..
	 */
	INIT_FENCE_CB(&commit->fence_cb, fence_cb);
	init_kthread_work(&commit->commit_work, _msm_drm_commit_work_cb);

	return commit;
}

static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
static void commit_set_fence(struct msm_commit *commit,
		struct drm_framebuffer *fb)
{
	struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
	c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
	commit->fence = max(commit->fence,
			msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
}

/* Start display thread function */
static int msm_atomic_commit_dispatch(struct drm_device *dev,
		struct drm_atomic_state *state, struct msm_commit *commit)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct drm_crtc *crtc = NULL;
	struct drm_crtc_state *crtc_state = NULL;
	int ret = -EINVAL, i = 0, j = 0;

	for_each_crtc_in_state(state, crtc, crtc_state, i) {
		for (j = 0; j < priv->num_crtcs; j++) {
			if (priv->disp_thread[j].crtc_id ==
						crtc->base.id) {
				if (priv->disp_thread[j].thread) {
					queue_kthread_work(
						&priv->disp_thread[j].worker,
							&commit->commit_work);
					/* only return zero if work is
					 * queued successfully.
					 */
					ret = 0;
				} else {
					DRM_ERROR(" Error for crtc_id: %d\n",
						priv->disp_thread[j].crtc_id);
				}
				break;
			}
		}
		/*
		 * TODO: handle cases where there will be more than
		 * one crtc per commit cycle. Remove this check then.
		 * Current assumption is there will be only one crtc
		 * per commit cycle.
		 */
		if (j < priv->num_crtcs)
			break;
	}

	return ret;
}

/**
@@ -470,9 +527,8 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
 * @state: the driver state object
 * @async: asynchronous commit
 *
 * This function commits a with drm_atomic_helper_check() pre-validated state
 * object. This can still fail when e.g. the framebuffer reservation fails. For
 * now this doesn't implement asynchronous commits.
 * This function commits with drm_atomic_helper_check() pre-validated state
 * object. This can still fail when e.g. the framebuffer reservation fails.
 *
 * RETURNS
 * Zero for success or -errno.
@@ -484,16 +540,17 @@ int msm_atomic_commit(struct drm_device *dev,
	int nplanes = dev->mode_config.num_total_plane;
	int ncrtcs = dev->mode_config.num_crtc;
	ktime_t timeout;
	struct msm_commit *c;
	struct msm_commit *commit;
	int i, ret;

	ret = drm_atomic_helper_prepare_planes(dev, state);
	if (ret)
		return ret;

	c = commit_init(state);
	if (!c) {
		ret = -ENOMEM;
	commit = commit_init(state);
	if (IS_ERR_OR_NULL(commit)) {
		ret = PTR_ERR(commit);
		DRM_ERROR("commit_init failed: %d\n", ret);
		goto error;
	}

@@ -504,7 +561,7 @@ int msm_atomic_commit(struct drm_device *dev,
		struct drm_crtc *crtc = state->crtcs[i];
		if (!crtc)
			continue;
		c->crtc_mask |= (1 << drm_crtc_index(crtc));
		commit->crtc_mask |= (1 << drm_crtc_index(crtc));
	}

	/*
@@ -518,16 +575,17 @@ int msm_atomic_commit(struct drm_device *dev,
			continue;

		if ((plane->state->fb != new_state->fb) && new_state->fb)
			add_fb(c, new_state->fb);
			commit_set_fence(commit, new_state->fb);
	}

	/*
	 * Wait for pending updates on any of the same crtc's and then
	 * mark our set of crtc's as busy:
	 */
	ret = start_atomic(dev->dev_private, c->crtc_mask);
	ret = start_atomic(dev->dev_private, commit->crtc_mask);
	if (ret) {
		kfree(c);
		DRM_ERROR("start_atomic failed: %d\n", ret);
		commit_destroy(commit);
		goto error;
	}

@@ -566,16 +624,22 @@ int msm_atomic_commit(struct drm_device *dev,
	 */

	if (async) {
		msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
		ret = msm_atomic_commit_dispatch(dev, state, commit);
		if (ret) {
			DRM_ERROR("%s: atomic commit failed\n", __func__);
			drm_atomic_state_free(state);
			commit_destroy(commit);
			goto error;
		}
		return 0;
	}

	timeout = ktime_add_ms(ktime_get(), 1000);

	/* uninterruptible wait */
	msm_wait_fence(dev, c->fence, &timeout, false);
	msm_wait_fence(dev, commit->fence, &timeout, false);

	complete_commit(c);
	complete_commit(commit);

	return 0;

+38 −5
Original line number Diff line number Diff line
@@ -123,7 +123,7 @@ struct vblank_event {
	bool enable;
};

static void vblank_ctrl_worker(struct work_struct *work)
static void vblank_ctrl_worker(struct kthread_work *work)
{
	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
						struct msm_vblank_ctrl, work);
@@ -171,7 +171,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);

	queue_work(priv->wq, &vbl_ctrl->work);
	queue_kthread_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work);

	return 0;
}
@@ -187,17 +187,27 @@ static int msm_unload(struct drm_device *dev)
	struct msm_gpu *gpu = priv->gpu;
	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
	struct vblank_event *vbl_ev, *tmp;
	int i;

	/* We must cancel and cleanup any pending vblank enable/disable
	 * work before drm_irq_uninstall() to avoid work re-enabling an
	 * irq after uninstall has disabled it.
	 */
	cancel_work_sync(&vbl_ctrl->work);
	flush_kthread_work(&vbl_ctrl->work);
	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
		list_del(&vbl_ev->node);
		kfree(vbl_ev);
	}

	/* clean up display commit worker threads */
	for (i = 0; i < priv->num_crtcs; i++) {
		if (priv->disp_thread[i].thread) {
			flush_kthread_worker(&priv->disp_thread[i].worker);
			kthread_stop(priv->disp_thread[i].thread);
			priv->disp_thread[i].thread = NULL;
		}
	}

	drm_kms_helper_poll_fini(dev);
	drm_mode_config_cleanup(dev);
	drm_vblank_cleanup(dev);
@@ -347,7 +357,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
	struct platform_device *pdev = dev->platformdev;
	struct msm_drm_private *priv;
	struct msm_kms *kms;
	int ret;
	int ret, i;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
@@ -364,7 +374,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
	INIT_LIST_HEAD(&priv->inactive_list);
	INIT_LIST_HEAD(&priv->fence_cbs);
	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
	init_kthread_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
	spin_lock_init(&priv->vblank_ctrl.lock);

	msm_force_submit(priv);
@@ -410,6 +420,29 @@ static int msm_load(struct drm_device *dev, unsigned long flags)

	priv->kms = kms;

	/* initialize commit thread structure */
	for (i = 0; i < priv->num_crtcs; i++) {
		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
		init_kthread_worker(&priv->disp_thread[i].worker);
		priv->disp_thread[i].dev = dev;
		priv->disp_thread[i].thread =
			kthread_run(kthread_worker_fn,
				&priv->disp_thread[i].worker,
				"crtc_commit:%d",
				priv->disp_thread[i].crtc_id);

		if (IS_ERR(priv->disp_thread[i].thread)) {
			dev_err(dev->dev, "failed to create kthread\n");
			priv->disp_thread[i].thread = NULL;
			/* clean up previously created threads if any */
			for (i -= 1; i >= 0; i--) {
				kthread_stop(priv->disp_thread[i].thread);
				priv->disp_thread[i].thread = NULL;
			}
			goto fail;
		}
	}

	if (kms) {
		pm_runtime_enable(dev->dev);
		ret = kms->funcs->hw_init(kms);
+12 −1
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@
#include <linux/types.h>
#include <linux/of_graph.h>
#include <asm/sizes.h>
#include <linux/kthread.h>

#ifndef CONFIG_OF
#include <mach/board.h>
@@ -141,7 +142,7 @@ enum msm_mdp_display_id {
};

struct msm_vblank_ctrl {
	struct work_struct work;
	struct kthread_work work;
	struct list_head event_list;
	spinlock_t lock;
};
@@ -213,6 +214,14 @@ struct msm_display_info {

struct display_manager;

/* Commit thread specific structure */
struct msm_drm_commit {
	struct drm_device *dev;
	struct task_struct *thread;
	unsigned int crtc_id;
	struct kthread_worker worker;
};

struct msm_drm_private {

	struct msm_kms *kms;
@@ -271,6 +280,8 @@ struct msm_drm_private {
	unsigned int num_crtcs;
	struct drm_crtc *crtcs[MAX_CRTCS];

	struct msm_drm_commit disp_thread[MAX_CRTCS];

	unsigned int num_encoders;
	struct drm_encoder *encoders[MAX_ENCODERS];