Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c2841e62 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: sde: Convert workq to kernel thread in SDE rotator"

parents b52caad5 6ade5823
Loading
Loading
Loading
Loading
+77 −42
Original line number Diff line number Diff line
@@ -984,6 +984,7 @@ static int sde_rotator_init_queue(struct sde_rot_mgr *mgr)
{
	int i, size, ret = 0;
	char name[32];
	struct sched_param param = { .sched_priority = 5 };

	size = sizeof(struct sde_rot_queue) * mgr->queue_count;
	mgr->commitq = devm_kzalloc(mgr->device, size, GFP_KERNEL);
@@ -994,11 +995,21 @@ static int sde_rotator_init_queue(struct sde_rot_mgr *mgr)
		snprintf(name, sizeof(name), "rot_commitq_%d_%d",
				mgr->device->id, i);
		SDEROT_DBG("work queue name=%s\n", name);
		mgr->commitq[i].rot_work_queue =
			alloc_ordered_workqueue("%s",
				WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
		if (!mgr->commitq[i].rot_work_queue) {
		init_kthread_worker(&mgr->commitq[i].rot_kw);
		mgr->commitq[i].rot_thread = kthread_run(kthread_worker_fn,
				&mgr->commitq[i].rot_kw, name);
		if (IS_ERR(mgr->commitq[i].rot_thread)) {
			ret = -EPERM;
			mgr->commitq[i].rot_thread = NULL;
			break;
		}

		ret = sched_setscheduler(mgr->commitq[i].rot_thread,
			SCHED_FIFO, &param);
		if (ret) {
			SDEROT_ERR(
				"failed to set kthread priority for commitq %d\n",
				ret);
			break;
		}

@@ -1015,10 +1026,21 @@ static int sde_rotator_init_queue(struct sde_rot_mgr *mgr)
		snprintf(name, sizeof(name), "rot_doneq_%d_%d",
				mgr->device->id, i);
		SDEROT_DBG("work queue name=%s\n", name);
		mgr->doneq[i].rot_work_queue = alloc_ordered_workqueue("%s",
				WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
		if (!mgr->doneq[i].rot_work_queue) {
		init_kthread_worker(&mgr->doneq[i].rot_kw);
		mgr->doneq[i].rot_thread = kthread_run(kthread_worker_fn,
				&mgr->doneq[i].rot_kw, name);
		if (IS_ERR(mgr->doneq[i].rot_thread)) {
			ret = -EPERM;
			mgr->doneq[i].rot_thread = NULL;
			break;
		}

		ret = sched_setscheduler(mgr->doneq[i].rot_thread,
			SCHED_FIFO, &param);
		if (ret) {
			SDEROT_ERR(
				"failed to set kthread priority for doneq %d\n",
				ret);
			break;
		}

@@ -1034,18 +1056,20 @@ static void sde_rotator_deinit_queue(struct sde_rot_mgr *mgr)

	if (mgr->commitq) {
		for (i = 0; i < mgr->queue_count; i++) {
			if (mgr->commitq[i].rot_work_queue)
				destroy_workqueue(
					mgr->commitq[i].rot_work_queue);
			if (mgr->commitq[i].rot_thread) {
				flush_kthread_worker(&mgr->commitq[i].rot_kw);
				kthread_stop(mgr->commitq[i].rot_thread);
			}
		}
		devm_kfree(mgr->device, mgr->commitq);
		mgr->commitq = NULL;
	}
	if (mgr->doneq) {
		for (i = 0; i < mgr->queue_count; i++) {
			if (mgr->doneq[i].rot_work_queue)
				destroy_workqueue(
					mgr->doneq[i].rot_work_queue);
			if (mgr->doneq[i].rot_thread) {
				flush_kthread_worker(&mgr->doneq[i].rot_kw);
				kthread_stop(mgr->doneq[i].rot_thread);
			}
		}
		devm_kfree(mgr->device, mgr->doneq);
		mgr->doneq = NULL;
@@ -1166,7 +1190,7 @@ void sde_rotator_queue_request(struct sde_rot_mgr *mgr,

		if (entry->item.ts)
			entry->item.ts[SDE_ROTATOR_TS_QUEUE] = ktime_get();
		queue_work(queue->rot_work_queue, &entry->commit_work);
		queue_kthread_work(&queue->rot_kw, &entry->commit_work);
	}
}

@@ -1377,12 +1401,13 @@ static void sde_rotator_release_entry(struct sde_rot_mgr *mgr,
 *
 * Note this asynchronous handler is protected by hal lock.
 */
static void sde_rotator_commit_handler(struct work_struct *work)
static void sde_rotator_commit_handler(struct kthread_work *work)
{
	struct sde_rot_entry *entry;
	struct sde_rot_entry_container *request;
	struct sde_rot_hw_resource *hw;
	struct sde_rot_mgr *mgr;
	struct sched_param param = { .sched_priority = 5 };
	int ret;

	entry = container_of(work, struct sde_rot_entry, commit_work);
@@ -1393,6 +1418,12 @@ static void sde_rotator_commit_handler(struct work_struct *work)
		return;
	}

	ret = sched_setscheduler(entry->fenceq->rot_thread, SCHED_FIFO, &param);
	if (ret) {
		SDEROT_WARN("Fail to set kthread priority for fenceq: %d\n",
				ret);
	}

	mgr = entry->private->mgr;

	SDEROT_EVTLOG(
@@ -1466,7 +1497,7 @@ static void sde_rotator_commit_handler(struct work_struct *work)
	if (entry->item.ts)
		entry->item.ts[SDE_ROTATOR_TS_FLUSH] = ktime_get();

	queue_work(entry->doneq->rot_work_queue, &entry->done_work);
	queue_kthread_work(&entry->doneq->rot_kw, &entry->done_work);
	sde_rot_mgr_unlock(mgr);
	return;
error:
@@ -1478,8 +1509,8 @@ get_hw_res_err:
	sde_rotator_release_entry(mgr, entry);
	atomic_dec(&request->pending_count);
	atomic_inc(&request->failed_count);
	if (request->retireq && request->retire_work)
		queue_work(request->retireq, request->retire_work);
	if (request->retire_kw && request->retire_work)
		queue_kthread_work(request->retire_kw, request->retire_work);
	sde_rot_mgr_unlock(mgr);
}

@@ -1493,7 +1524,7 @@ get_hw_res_err:
 *
 * Note this asynchronous handler is protected by hal lock.
 */
static void sde_rotator_done_handler(struct work_struct *work)
static void sde_rotator_done_handler(struct kthread_work *work)
{
	struct sde_rot_entry *entry;
	struct sde_rot_entry_container *request;
@@ -1551,8 +1582,8 @@ static void sde_rotator_done_handler(struct work_struct *work)
	ATRACE_INT("sde_rot_done", 1);
	sde_rotator_release_entry(mgr, entry);
	atomic_dec(&request->pending_count);
	if (request->retireq && request->retire_work)
		queue_work(request->retireq, request->retire_work);
	if (request->retire_kw && request->retire_work)
		queue_kthread_work(request->retire_kw, request->retire_work);
	if (entry->item.ts)
		entry->item.ts[SDE_ROTATOR_TS_RETIRE] = ktime_get();
	sde_rot_mgr_unlock(mgr);
@@ -1918,8 +1949,10 @@ static int sde_rotator_add_request(struct sde_rot_mgr *mgr,

		entry->request = req;

		INIT_WORK(&entry->commit_work, sde_rotator_commit_handler);
		INIT_WORK(&entry->done_work, sde_rotator_done_handler);
		init_kthread_work(&entry->commit_work,
				sde_rotator_commit_handler);
		init_kthread_work(&entry->done_work,
				sde_rotator_done_handler);
		SDEROT_DBG("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
			"dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
			item->src_rect.x, item->src_rect.y,
@@ -1957,17 +1990,18 @@ static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr,
	struct sde_rot_entry *entry;
	int i;

	if (atomic_read(&req->pending_count)) {
		/*
		 * To avoid signal the rotation entry output fence in the wrong
	 * order, all the entries in the same request needs to be canceled
	 * first, before signaling the output fence.
		 * order, all the entries in the same request needs to be
		 * canceled first, before signaling the output fence.
		 */
		SDEROT_DBG("cancel work start\n");
		sde_rot_mgr_unlock(mgr);
		for (i = req->count - 1; i >= 0; i--) {
			entry = req->entries + i;
		cancel_work_sync(&entry->commit_work);
		cancel_work_sync(&entry->done_work);
			flush_kthread_worker(&entry->commitq->rot_kw);
			flush_kthread_worker(&entry->doneq->rot_kw);
		}
		sde_rot_mgr_lock(mgr);
		SDEROT_DBG("cancel work done\n");
@@ -1976,6 +2010,7 @@ static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr,
			sde_rotator_signal_output(entry);
			sde_rotator_release_entry(mgr, entry);
		}
	}

	list_del_init(&req->list);
	devm_kfree(&mgr->pdev->dev, req);
@@ -1999,7 +2034,7 @@ static void sde_rotator_free_completed_request(struct sde_rot_mgr *mgr,

	list_for_each_entry_safe(req, req_next, &private->req_list, list) {
		if ((atomic_read(&req->pending_count) == 0) &&
				(!req->retire_work && !req->retireq)) {
				(!req->retire_work && !req->retire_kw)) {
			list_del_init(&req->list);
			devm_kfree(&mgr->pdev->dev, req);
		}
+7 −5
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/pm_runtime.h>
#include <linux/kthread.h>

#include "sde_rotator_base.h"
#include "sde_rotator_util.h"
@@ -184,7 +185,8 @@ struct sde_rot_hw_resource {
};

struct sde_rot_queue {
	struct workqueue_struct *rot_work_queue;
	struct kthread_worker rot_kw;
	struct task_struct *rot_thread;
	struct sde_rot_timeline *timeline;
	struct sde_rot_hw_resource *hw;
};
@@ -195,8 +197,8 @@ struct sde_rot_entry_container {
	u32 count;
	atomic_t pending_count;
	atomic_t failed_count;
	struct workqueue_struct *retireq;
	struct work_struct *retire_work;
	struct kthread_worker *retire_kw;
	struct kthread_work *retire_work;
	struct sde_rot_entry *entries;
};

@@ -205,8 +207,8 @@ struct sde_rot_file_private;

struct sde_rot_entry {
	struct sde_rotation_item item;
	struct work_struct commit_work;
	struct work_struct done_work;
	struct kthread_work commit_work;
	struct kthread_work done_work;
	struct sde_rot_queue *commitq;
	struct sde_rot_queue *fenceq;
	struct sde_rot_queue *doneq;
+25 −27
Original line number Diff line number Diff line
@@ -53,8 +53,8 @@
#define SDE_ROTATOR_DEGREE_180		180
#define SDE_ROTATOR_DEGREE_90		90

static void sde_rotator_submit_handler(struct work_struct *work);
static void sde_rotator_retire_handler(struct work_struct *work);
static void sde_rotator_submit_handler(struct kthread_work *work);
static void sde_rotator_retire_handler(struct kthread_work *work);
#ifdef CONFIG_COMPAT
static long sde_rotator_compat_ioctl32(struct file *file,
	unsigned int cmd, unsigned long arg);
@@ -466,8 +466,8 @@ static void sde_rotator_stop_streaming(struct vb2_queue *q)
		sde_rotator_cancel_all_requests(rot_dev->mgr, ctx->private);
		sde_rot_mgr_unlock(rot_dev->mgr);
		mutex_unlock(q->lock);
		cancel_work_sync(&ctx->submit_work);
		cancel_work_sync(&ctx->retire_work);
		flush_kthread_work(&ctx->submit_work);
		flush_kthread_work(&ctx->retire_work);
		mutex_lock(q->lock);
	}

@@ -765,8 +765,6 @@ static ssize_t sde_rotator_ctx_show(struct kobject *kobj,
			ctx->format_cap.fmt.pix.sizeimage);
	SPRINT("abort_pending=%d\n", ctx->abort_pending);
	SPRINT("command_pending=%d\n", atomic_read(&ctx->command_pending));
	SPRINT("submit_work=%d\n", work_busy(&ctx->submit_work));
	SPRINT("retire_work=%d\n", work_busy(&ctx->retire_work));
	SPRINT("sequence=%u\n",
		sde_rotator_get_timeline_commit_ts(ctx->work_queue.timeline));
	SPRINT("timestamp=%u\n",
@@ -923,8 +921,8 @@ static int sde_rotator_open(struct file *file)
	ctx->crop_out.width = 640;
	ctx->crop_out.height = 480;
	init_waitqueue_head(&ctx->wait_queue);
	INIT_WORK(&ctx->submit_work, sde_rotator_submit_handler);
	INIT_WORK(&ctx->retire_work, sde_rotator_retire_handler);
	init_kthread_work(&ctx->submit_work, sde_rotator_submit_handler);
	init_kthread_work(&ctx->retire_work, sde_rotator_retire_handler);

	v4l2_fh_init(&ctx->fh, video);
	file->private_data = &ctx->fh;
@@ -954,14 +952,16 @@ static int sde_rotator_open(struct file *file)

	snprintf(name, sizeof(name), "rot_fenceq_%d_%d", rot_dev->dev->id,
			ctx->session_id);
	ctx->work_queue.rot_work_queue = alloc_ordered_workqueue("%s",
			WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
	if (!ctx->work_queue.rot_work_queue) {
		SDEDEV_ERR(ctx->rot_dev->dev, "fail allocate workqueue\n");
	init_kthread_worker(&ctx->work_queue.rot_kw);
	ctx->work_queue.rot_thread = kthread_run(kthread_worker_fn,
			&ctx->work_queue.rot_kw, name);
	if (IS_ERR(ctx->work_queue.rot_thread)) {
		SDEDEV_ERR(ctx->rot_dev->dev, "fail allocate kthread\n");
		ret = -EPERM;
		ctx->work_queue.rot_thread = NULL;
		goto error_alloc_workqueue;
	}
	SDEDEV_DBG(ctx->rot_dev->dev, "work queue name=%s\n", name);
	SDEDEV_DBG(ctx->rot_dev->dev, "kthread name=%s\n", name);

	snprintf(name, sizeof(name), "%d_%d", rot_dev->dev->id,
			ctx->session_id);
@@ -1010,7 +1010,8 @@ error_ctrl_handler:
error_open_session:
	sde_rot_mgr_unlock(rot_dev->mgr);
	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
	destroy_workqueue(ctx->work_queue.rot_work_queue);
	flush_kthread_worker(&ctx->work_queue.rot_kw);
	kthread_stop(ctx->work_queue.rot_thread);
error_alloc_workqueue:
	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
error_create_sysfs:
@@ -1045,20 +1046,17 @@ static int sde_rotator_release(struct file *file)
	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
	mutex_unlock(&rot_dev->lock);
	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d w:%x\n",
			session_id, work_busy(&ctx->submit_work));
	cancel_work_sync(&ctx->submit_work);
	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n", session_id);
	flush_kthread_worker(&ctx->work_queue.rot_kw);
	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
	sde_rot_mgr_lock(rot_dev->mgr);
	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
	sde_rot_mgr_unlock(rot_dev->mgr);
	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d w:%x\n",
			session_id, work_busy(&ctx->retire_work));
	cancel_work_sync(&ctx->retire_work);
	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
	mutex_lock(&rot_dev->lock);
	SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
	destroy_workqueue(ctx->work_queue.rot_work_queue);
	kthread_stop(ctx->work_queue.rot_thread);
	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
	kobject_put(&ctx->kobj);
	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
@@ -2023,7 +2021,7 @@ static const struct v4l2_ioctl_ops sde_rotator_ioctl_ops = {
 *
 * This function is scheduled in work queue context.
 */
static void sde_rotator_retire_handler(struct work_struct *work)
static void sde_rotator_retire_handler(struct kthread_work *work)
{
	struct vb2_v4l2_buffer *src_buf;
	struct vb2_v4l2_buffer *dst_buf;
@@ -2209,7 +2207,7 @@ static int sde_rotator_process_buffers(struct sde_rotator_ctx *ctx,
		goto error_init_request;
	}

	req->retireq = ctx->work_queue.rot_work_queue;
	req->retire_kw = &ctx->work_queue.rot_kw;
	req->retire_work = &ctx->retire_work;

	ret = sde_rotator_handle_request_common(
@@ -2238,7 +2236,7 @@ error_null_buffer:
 *
 * This function is scheduled in work queue context.
 */
static void sde_rotator_submit_handler(struct work_struct *work)
static void sde_rotator_submit_handler(struct kthread_work *work)
{
	struct sde_rotator_ctx *ctx;
	struct sde_rotator_device *rot_dev;
@@ -2325,7 +2323,7 @@ static void sde_rotator_device_run(void *priv)

			/* disconnect request (will be freed by core layer) */
			sde_rot_mgr_lock(rot_dev->mgr);
			ctx->request->retireq = NULL;
			ctx->request->retire_kw = NULL;
			ctx->request->retire_work = NULL;
			ctx->request = NULL;
			sde_rot_mgr_unlock(rot_dev->mgr);
@@ -2364,7 +2362,7 @@ static void sde_rotator_device_run(void *priv)

			/* disconnect request (will be freed by core layer) */
			sde_rot_mgr_lock(rot_dev->mgr);
			ctx->request->retireq = NULL;
			ctx->request->retire_kw = NULL;
			ctx->request->retire_work = NULL;
			ctx->request = ERR_PTR(-EIO);
			sde_rot_mgr_unlock(rot_dev->mgr);
@@ -2471,7 +2469,7 @@ static int sde_rotator_job_ready(void *priv)
				v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx),
				atomic_read(&ctx->command_pending));
		atomic_inc(&ctx->command_pending);
		queue_work(ctx->work_queue.rot_work_queue, &ctx->submit_work);
		queue_kthread_work(&ctx->work_queue.rot_kw, &ctx->submit_work);
	} else if (!atomic_read(&ctx->request->pending_count)) {
		/* if pending request completed, forward to device run state */
		SDEDEV_DBG(rot_dev->dev,
+4 −3
Original line number Diff line number Diff line
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,7 @@
#include <linux/iommu.h>
#include <linux/dma-buf.h>
#include <linux/msm-bus.h>
#include <linux/kthread.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ctrls.h>
@@ -131,8 +132,8 @@ struct sde_rotator_ctx {
	struct sde_rotator_vbinfo *vbinfo_cap;
	struct sde_rotator_vbinfo *vbinfo_out;
	wait_queue_head_t wait_queue;
	struct work_struct submit_work;
	struct work_struct retire_work;
	struct kthread_work submit_work;
	struct kthread_work retire_work;
	struct sde_rot_queue work_queue;
	struct sde_rot_entry_container *request;
	struct sde_rot_file_private *private;