Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f48c46ac authored by Sandeep Panda's avatar Sandeep Panda Committed by Gerrit - the friendly Code Review server
Browse files

drm/msm: create separate commit thread for each display



This change creates separate threads for commit on each
display, so that one display commit is not blocked on
other display's commit. Hence improving performance in
multi display use cases.

Change-Id: Ibd0dae1da53ec3a72de8b96c3c03ce51830cb4f9
Signed-off-by: default avatarSandeep Panda <spanda@codeaurora.org>
Signed-off-by: default avatarDhaval Patel <pdhaval@codeaurora.org>
parent 3779fd6b
Loading
Loading
Loading
Loading
+77 −23
Original line number Diff line number Diff line
@@ -24,12 +24,10 @@
struct msm_commit {
	struct drm_device *dev;
	struct drm_atomic_state *state;
	struct work_struct work;
	uint32_t crtc_mask;
	struct kthread_work commit_work;
};

static void commit_worker(struct work_struct *work);

/* block until specified crtcs are no longer pending update, and
 * atomically mark them as pending update
 */
@@ -60,21 +58,6 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
	spin_unlock(&priv->pending_crtcs_event.lock);
}

static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
	struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);

	if (!c)
		return NULL;

	c->dev = state->dev;
	c->state = state;

	INIT_WORK(&c->work, commit_worker);

	return c;
}

static void commit_destroy(struct msm_commit *c)
{
	end_atomic(c->dev->dev_private, c->crtc_mask);
@@ -404,7 +387,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
/* The (potentially) asynchronous part of the commit.  At this point
 * nothing can fail short of armageddon.
 */
static void complete_commit(struct msm_commit *c, bool async)
static void complete_commit(struct msm_commit *c)
{
	struct drm_atomic_state *state = c->state;
	struct drm_device *dev = state->dev;
@@ -445,9 +428,74 @@ static void complete_commit(struct msm_commit *c, bool async)
	commit_destroy(c);
}

static void commit_worker(struct work_struct *work)
static void _msm_drm_commit_work_cb(struct kthread_work *work)
{
	struct msm_commit *commit =  NULL;

	if (!work) {
		DRM_ERROR("%s: Invalid commit work data!\n", __func__);
		return;
	}

	commit = container_of(work, struct msm_commit, commit_work);

	complete_commit(commit);
}

static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
	struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);

	if (!c)
		return NULL;

	c->dev = state->dev;
	c->state = state;

	kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb);

	return c;
}

/* Start display thread function */
static int msm_atomic_commit_dispatch(struct drm_device *dev,
		struct drm_atomic_state *state, struct msm_commit *commit)
{
	complete_commit(container_of(work, struct msm_commit, work), true);
	struct msm_drm_private *priv = dev->dev_private;
	struct drm_crtc *crtc = NULL;
	struct drm_crtc_state *crtc_state = NULL;
	int ret = -EINVAL, i = 0, j = 0;

	for_each_crtc_in_state(state, crtc, crtc_state, i) {
		for (j = 0; j < priv->num_crtcs; j++) {
			if (priv->disp_thread[j].crtc_id ==
						crtc->base.id) {
				if (priv->disp_thread[j].thread) {
					kthread_queue_work(
						&priv->disp_thread[j].worker,
							&commit->commit_work);
					/* only return zero if work is
					 * queued successfully.
					 */
					ret = 0;
				} else {
					DRM_ERROR(" Error for crtc_id: %d\n",
						priv->disp_thread[j].crtc_id);
				}
				break;
			}
		}
		/*
		 * TODO: handle cases where there will be more than
		 * one crtc per commit cycle. Remove this check then.
		 * Current assumption is there will be only one crtc
		 * per commit cycle.
		 */
		if (j < priv->num_crtcs)
			break;
	}

	return ret;
}

/**
@@ -546,11 +594,17 @@ int msm_atomic_commit(struct drm_device *dev,
	 */

	if (nonblock) {
		queue_work(priv->atomic_wq, &c->work);
		ret = msm_atomic_commit_dispatch(dev, state, c);
		if (ret) {
			DRM_ERROR("%s: atomic commit failed\n", __func__);
			drm_atomic_state_free(state);
			commit_destroy(c);
			goto error;
		}
		return 0;
	}

	complete_commit(c, false);
	complete_commit(c);

	return 0;

+37 −9
Original line number Diff line number Diff line
@@ -168,7 +168,7 @@ struct vblank_event {
	bool enable;
};

static void vblank_ctrl_worker(struct work_struct *work)
static void vblank_ctrl_worker(struct kthread_work *work)
{
	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
						struct msm_vblank_ctrl, work);
@@ -216,7 +216,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);

	queue_work(priv->wq, &vbl_ctrl->work);
	kthread_queue_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work);

	return 0;
}
@@ -230,17 +230,27 @@ static int msm_drm_uninit(struct device *dev)
	struct msm_gpu *gpu = priv->gpu;
	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
	struct vblank_event *vbl_ev, *tmp;
	int i;

	/* We must cancel and cleanup any pending vblank enable/disable
	 * work before drm_irq_uninstall() to avoid work re-enabling an
	 * irq after uninstall has disabled it.
	 */
	cancel_work_sync(&vbl_ctrl->work);
	kthread_flush_work(&vbl_ctrl->work);
	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
		list_del(&vbl_ev->node);
		kfree(vbl_ev);
	}

	/* clean up display commit worker threads */
	for (i = 0; i < priv->num_crtcs; i++) {
		if (priv->disp_thread[i].thread) {
			kthread_flush_worker(&priv->disp_thread[i].worker);
			kthread_stop(priv->disp_thread[i].thread);
			priv->disp_thread[i].thread = NULL;
		}
	}

	msm_gem_shrinker_cleanup(ddev);

	drm_kms_helper_poll_fini(ddev);
@@ -263,9 +273,6 @@ static int msm_drm_uninit(struct device *dev)
	flush_workqueue(priv->wq);
	destroy_workqueue(priv->wq);

	flush_workqueue(priv->atomic_wq);
	destroy_workqueue(priv->atomic_wq);

	if (kms && kms->funcs)
		kms->funcs->destroy(kms);

@@ -432,7 +439,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
	struct drm_device *ddev;
	struct msm_drm_private *priv;
	struct msm_kms *kms;
	int ret;
	int ret, i;

	ddev = drm_dev_alloc(drv, dev);
	if (!ddev) {
@@ -462,13 +469,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
		goto mdss_init_fail;

	priv->wq = alloc_ordered_workqueue("msm_drm", 0);
	priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
	init_waitqueue_head(&priv->pending_crtcs_event);

	INIT_LIST_HEAD(&priv->client_event_list);
	INIT_LIST_HEAD(&priv->inactive_list);
	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
	spin_lock_init(&priv->vblank_ctrl.lock);

	ret = sde_power_resource_init(pdev, &priv->phandle);
@@ -529,6 +535,28 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
	priv->kms = kms;
	pm_runtime_enable(dev);

	for (i = 0; i < priv->num_crtcs; i++) {
		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
		kthread_init_worker(&priv->disp_thread[i].worker);
		priv->disp_thread[i].dev = ddev;
		priv->disp_thread[i].thread =
			kthread_run(kthread_worker_fn,
				&priv->disp_thread[i].worker,
				"crtc_commit:%d",
				priv->disp_thread[i].crtc_id);

		if (IS_ERR(priv->disp_thread[i].thread)) {
			dev_err(dev, "failed to create kthread\n");
			priv->disp_thread[i].thread = NULL;
			/* clean up previously created threads if any */
			for (i -= 1; i >= 0; i--) {
				kthread_stop(priv->disp_thread[i].thread);
				priv->disp_thread[i].thread = NULL;
			}
			goto fail;
		}
	}

	if (kms) {
		ret = kms->funcs->hw_init(kms);
		if (ret) {
+12 −2
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@
#include <linux/of_device.h>
#include <linux/sde_io_util.h>
#include <asm/sizes.h>
#include <linux/kthread.h>

#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -154,7 +155,7 @@ enum msm_mdp_conn_property {
};

struct msm_vblank_ctrl {
	struct work_struct work;
	struct kthread_work work;
	struct list_head event_list;
	spinlock_t lock;
};
@@ -235,6 +236,14 @@ struct msm_drm_event {
	u8 data[];
};

/* Commit thread specific structure */
struct msm_drm_commit {
	struct drm_device *dev;
	struct task_struct *thread;
	unsigned int crtc_id;
	struct kthread_worker worker;
};

struct msm_drm_private {

	struct drm_device *dev;
@@ -277,7 +286,6 @@ struct msm_drm_private {
	struct list_head inactive_list;

	struct workqueue_struct *wq;
	struct workqueue_struct *atomic_wq;

	/* crtcs pending async atomic updates: */
	uint32_t pending_crtcs;
@@ -293,6 +301,8 @@ struct msm_drm_private {
	unsigned int num_crtcs;
	struct drm_crtc *crtcs[MAX_CRTCS];

	struct msm_drm_commit disp_thread[MAX_CRTCS];

	unsigned int num_encoders;
	struct drm_encoder *encoders[MAX_ENCODERS];