Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 23b72221 authored by Jayant Shekhar's avatar Jayant Shekhar
Browse files

drm/msm/sde: Avoid same work queue to multiple workers



In vblank enable, same work is queued to different display
thread workers based on CRTC. This results in kernel
warnings as same work can't be used with >1 worker. Add
changes to fix this by using multiple works.

Change-Id: I39995bbbdf754d2cfb2a8e50d3354eccf77f7132
Signed-off-by: default avatarJayant Shekhar <jshekhar@codeaurora.org>
parent 38454009
Loading
Loading
Loading
Loading
+22 −53
Original line number Diff line number Diff line
@@ -232,62 +232,46 @@ u32 msm_readl(const void __iomem *addr)
	return val;
}

struct vblank_event {
	struct list_head node;
struct vblank_work {
	struct kthread_work work;
	int crtc_id;
	bool enable;
	struct msm_drm_private *priv;
};

static void vblank_ctrl_worker(struct kthread_work *work)
{
	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
						struct msm_vblank_ctrl, work);
	struct msm_drm_private *priv = container_of(vbl_ctrl,
					struct msm_drm_private, vblank_ctrl);
	struct vblank_work *cur_work = container_of(work,
					struct vblank_work, work);
	struct msm_drm_private *priv = cur_work->priv;
	struct msm_kms *kms = priv->kms;
	struct vblank_event *vbl_ev, *tmp;
	unsigned long flags;
	LIST_HEAD(tmp_head);

	spin_lock_irqsave(&vbl_ctrl->lock, flags);
	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
		list_del(&vbl_ev->node);
		list_add_tail(&vbl_ev->node, &tmp_head);
	}
	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);

	list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
		if (vbl_ev->enable)
			kms->funcs->enable_vblank(kms,
						priv->crtcs[vbl_ev->crtc_id]);
	if (cur_work->enable)
		kms->funcs->enable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
	else
			kms->funcs->disable_vblank(kms,
						priv->crtcs[vbl_ev->crtc_id]);
		kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);

		kfree(vbl_ev);
	}
	kfree(cur_work);
}

static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
					int crtc_id, bool enable)
{
	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
	struct vblank_event *vbl_ev;
	unsigned long flags;
	struct vblank_work *cur_work;

	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
	if (!vbl_ev)
		return -ENOMEM;
	if (!priv || crtc_id >= priv->num_crtcs)
		return -EINVAL;

	vbl_ev->crtc_id = crtc_id;
	vbl_ev->enable = enable;
	cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
	if (!cur_work)
		return -ENOMEM;

	spin_lock_irqsave(&vbl_ctrl->lock, flags);
	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
	kthread_init_work(&cur_work->work, vblank_ctrl_worker);
	cur_work->crtc_id = crtc_id;
	cur_work->enable = enable;
	cur_work->priv = priv;

	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
			&vbl_ctrl->work);
	kthread_queue_work(&priv->disp_thread[crtc_id].worker, &cur_work->work);

	return 0;
}
@@ -299,20 +283,8 @@ static int msm_drm_uninit(struct device *dev)
	struct msm_drm_private *priv = ddev->dev_private;
	struct msm_kms *kms = priv->kms;
	struct msm_gpu *gpu = priv->gpu;
	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
	struct vblank_event *vbl_ev, *tmp;
	int i;

	/* We must cancel and cleanup any pending vblank enable/disable
	 * work before drm_irq_uninstall() to avoid work re-enabling an
	 * irq after uninstall has disabled it.
	 */
	kthread_flush_work(&vbl_ctrl->work);
	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
		list_del(&vbl_ev->node);
		kfree(vbl_ev);
	}

	/* clean up display commit/event worker threads */
	for (i = 0; i < priv->num_crtcs; i++) {
		if (priv->disp_thread[i].thread) {
@@ -556,9 +528,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)

	INIT_LIST_HEAD(&priv->client_event_list);
	INIT_LIST_HEAD(&priv->inactive_list);
	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
	spin_lock_init(&priv->vblank_ctrl.lock);

	ret = sde_power_resource_init(pdev, &priv->phandle);
	if (ret) {
+0 −8
Original line number Diff line number Diff line
@@ -204,12 +204,6 @@ enum msm_mdp_conn_property {
	CONNECTOR_PROP_COUNT
};

struct msm_vblank_ctrl {
	struct kthread_work work;
	struct list_head event_list;
	spinlock_t lock;
};

#define MAX_H_TILES_PER_DISPLAY 2

/**
@@ -642,8 +636,6 @@ struct msm_drm_private {
	struct notifier_block vmap_notifier;
	struct shrinker shrinker;

	struct msm_vblank_ctrl vblank_ctrl;

	/* task holding struct_mutex.. currently only used in submit path
	 * to detect and reject faults from copy_from_user() for submit
	 * ioctl.