Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 23b72221 authored by Jayant Shekhar's avatar Jayant Shekhar
Browse files

drm/msm/sde: Avoid same work queue to multiple workers



In vblank enable, same work is queued to different display
thread workers based on CRTC. This results in kernel
warnings as same work can't be used with >1 worker. Add
changes to fix this by using multiple works.

Change-Id: I39995bbbdf754d2cfb2a8e50d3354eccf77f7132
Signed-off-by: default avatarJayant Shekhar <jshekhar@codeaurora.org>
parent 38454009
Loading
Loading
Loading
Loading
+22 −53
Original line number Original line Diff line number Diff line
@@ -232,62 +232,46 @@ u32 msm_readl(const void __iomem *addr)
	return val;
	return val;
}
}


struct vblank_event {
struct vblank_work {
	struct list_head node;
	struct kthread_work work;
	int crtc_id;
	int crtc_id;
	bool enable;
	bool enable;
	struct msm_drm_private *priv;
};
};


static void vblank_ctrl_worker(struct kthread_work *work)
static void vblank_ctrl_worker(struct kthread_work *work)
{
{
	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
	struct vblank_work *cur_work = container_of(work,
						struct msm_vblank_ctrl, work);
					struct vblank_work, work);
	struct msm_drm_private *priv = container_of(vbl_ctrl,
	struct msm_drm_private *priv = cur_work->priv;
					struct msm_drm_private, vblank_ctrl);
	struct msm_kms *kms = priv->kms;
	struct msm_kms *kms = priv->kms;
	struct vblank_event *vbl_ev, *tmp;
	unsigned long flags;
	LIST_HEAD(tmp_head);


	spin_lock_irqsave(&vbl_ctrl->lock, flags);
	if (cur_work->enable)
	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
		kms->funcs->enable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
		list_del(&vbl_ev->node);
		list_add_tail(&vbl_ev->node, &tmp_head);
	}
	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);

	list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
		if (vbl_ev->enable)
			kms->funcs->enable_vblank(kms,
						priv->crtcs[vbl_ev->crtc_id]);
	else
	else
			kms->funcs->disable_vblank(kms,
		kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
						priv->crtcs[vbl_ev->crtc_id]);


		kfree(vbl_ev);
	kfree(cur_work);
	}
}
}


static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
					int crtc_id, bool enable)
					int crtc_id, bool enable)
{
{
	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
	struct vblank_work *cur_work;
	struct vblank_event *vbl_ev;
	unsigned long flags;


	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
	if (!priv || crtc_id >= priv->num_crtcs)
	if (!vbl_ev)
		return -EINVAL;
		return -ENOMEM;


	vbl_ev->crtc_id = crtc_id;
	cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
	vbl_ev->enable = enable;
	if (!cur_work)
		return -ENOMEM;


	spin_lock_irqsave(&vbl_ctrl->lock, flags);
	kthread_init_work(&cur_work->work, vblank_ctrl_worker);
	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
	cur_work->crtc_id = crtc_id;
	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
	cur_work->enable = enable;
	cur_work->priv = priv;


	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
	kthread_queue_work(&priv->disp_thread[crtc_id].worker, &cur_work->work);
			&vbl_ctrl->work);


	return 0;
	return 0;
}
}
@@ -299,20 +283,8 @@ static int msm_drm_uninit(struct device *dev)
	struct msm_drm_private *priv = ddev->dev_private;
	struct msm_drm_private *priv = ddev->dev_private;
	struct msm_kms *kms = priv->kms;
	struct msm_kms *kms = priv->kms;
	struct msm_gpu *gpu = priv->gpu;
	struct msm_gpu *gpu = priv->gpu;
	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
	struct vblank_event *vbl_ev, *tmp;
	int i;
	int i;


	/* We must cancel and cleanup any pending vblank enable/disable
	 * work before drm_irq_uninstall() to avoid work re-enabling an
	 * irq after uninstall has disabled it.
	 */
	kthread_flush_work(&vbl_ctrl->work);
	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
		list_del(&vbl_ev->node);
		kfree(vbl_ev);
	}

	/* clean up display commit/event worker threads */
	/* clean up display commit/event worker threads */
	for (i = 0; i < priv->num_crtcs; i++) {
	for (i = 0; i < priv->num_crtcs; i++) {
		if (priv->disp_thread[i].thread) {
		if (priv->disp_thread[i].thread) {
@@ -556,9 +528,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)


	INIT_LIST_HEAD(&priv->client_event_list);
	INIT_LIST_HEAD(&priv->client_event_list);
	INIT_LIST_HEAD(&priv->inactive_list);
	INIT_LIST_HEAD(&priv->inactive_list);
	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
	spin_lock_init(&priv->vblank_ctrl.lock);


	ret = sde_power_resource_init(pdev, &priv->phandle);
	ret = sde_power_resource_init(pdev, &priv->phandle);
	if (ret) {
	if (ret) {
+0 −8
Original line number Original line Diff line number Diff line
@@ -204,12 +204,6 @@ enum msm_mdp_conn_property {
	CONNECTOR_PROP_COUNT
	CONNECTOR_PROP_COUNT
};
};


struct msm_vblank_ctrl {
	struct kthread_work work;
	struct list_head event_list;
	spinlock_t lock;
};

#define MAX_H_TILES_PER_DISPLAY 2
#define MAX_H_TILES_PER_DISPLAY 2


/**
/**
@@ -642,8 +636,6 @@ struct msm_drm_private {
	struct notifier_block vmap_notifier;
	struct notifier_block vmap_notifier;
	struct shrinker shrinker;
	struct shrinker shrinker;


	struct msm_vblank_ctrl vblank_ctrl;

	/* task holding struct_mutex.. currently only used in submit path
	/* task holding struct_mutex.. currently only used in submit path
	 * to detect and reject faults from copy_from_user() for submit
	 * to detect and reject faults from copy_from_user() for submit
	 * ioctl.
	 * ioctl.