Loading drivers/gpu/msm/adreno_dispatch.c +3 −3 Original line number Diff line number Diff line Loading @@ -2120,7 +2120,7 @@ static void _dispatcher_power_down(struct adreno_device *adreno_dev) mutex_unlock(&device->mutex); } static void adreno_dispatcher_work(struct work_struct *work) static void adreno_dispatcher_work(struct kthread_work *work) { struct adreno_dispatcher *dispatcher = container_of(work, struct adreno_dispatcher, work); Loading Loading @@ -2180,7 +2180,7 @@ void adreno_dispatcher_schedule(struct kgsl_device *device) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; kgsl_schedule_work(&dispatcher->work); queue_kthread_work(&kgsl_driver.worker, &dispatcher->work); } /** Loading Loading @@ -2476,7 +2476,7 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev) setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer, (unsigned long) adreno_dev); INIT_WORK(&dispatcher->work, adreno_dispatcher_work); init_kthread_work(&dispatcher->work, adreno_dispatcher_work); init_completion(&dispatcher->idle_gate); complete_all(&dispatcher->idle_gate); Loading drivers/gpu/msm/adreno_dispatch.h +1 −1 Original line number Diff line number Diff line Loading @@ -91,7 +91,7 @@ struct adreno_dispatcher { atomic_t fault; struct plist_head pending; spinlock_t plist_lock; struct work_struct work; struct kthread_work work; struct kobject kobj; struct completion idle_gate; unsigned int disp_preempt_fair_sched; Loading drivers/gpu/msm/kgsl.c +14 −0 Original line number Diff line number Diff line Loading @@ -4060,6 +4060,8 @@ static void kgsl_core_exit(void) static int __init kgsl_core_init(void) { int result = 0; struct sched_param param = { .sched_priority = 2 }; /* alloc major and minor device numbers */ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX, "kgsl"); Loading Loading @@ -4125,6 +4127,18 @@ static int __init kgsl_core_init(void) kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); init_kthread_worker(&kgsl_driver.worker); kgsl_driver.worker_thread = kthread_run(kthread_worker_fn, &kgsl_driver.worker, "kgsl_worker_thread"); if (IS_ERR(kgsl_driver.worker_thread)) { pr_err("unable to start kgsl thread\n"); goto err; } sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m); kgsl_events_init(); result = kgsl_cmdbatch_init(); Loading drivers/gpu/msm/kgsl.h +4 −1 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <linux/mm.h> #include <linux/dma-attrs.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <asm/cacheflush.h> /* The number of memstore arrays limits the number of contexts allowed. Loading Loading @@ -132,6 +133,8 @@ struct kgsl_driver { unsigned int full_cache_threshold; struct workqueue_struct *workqueue; struct workqueue_struct *mem_workqueue; struct kthread_worker worker; struct task_struct *worker_thread; }; extern struct kgsl_driver kgsl_driver; Loading Loading @@ -275,7 +278,7 @@ struct kgsl_event { void *priv; struct list_head node; unsigned int created; struct work_struct work; struct kthread_work work; int result; struct kgsl_event_group *group; }; Loading drivers/gpu/msm/kgsl_events.c +4 −4 Original line number Diff line number Diff line Loading @@ -32,7 +32,7 @@ static inline void signal_event(struct kgsl_device *device, { list_del(&event->node); event->result = result; queue_work(device->events_wq, &event->work); queue_kthread_work(&kgsl_driver.worker, &event->work); } /** Loading @@ -42,7 +42,7 @@ static inline void signal_event(struct kgsl_device *device, * Each event callback has its own work struct and is run on a event specific * workqeuue. This is the worker that queues up the event callback function. */ static void _kgsl_event_worker(struct work_struct *work) static void _kgsl_event_worker(struct kthread_work *work) { struct kgsl_event *event = container_of(work, struct kgsl_event, work); int id = KGSL_CONTEXT_ID(event->context); Loading Loading @@ -282,7 +282,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, event->created = jiffies; event->group = group; INIT_WORK(&event->work, _kgsl_event_worker); init_kthread_work(&event->work, _kgsl_event_worker); trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func); Loading @@ -297,7 +297,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, if (timestamp_cmp(retired, timestamp) >= 0) { event->result = KGSL_EVENT_RETIRED; queue_work(device->events_wq, &event->work); queue_kthread_work(&kgsl_driver.worker, &event->work); spin_unlock(&group->lock); return 0; } Loading Loading
drivers/gpu/msm/adreno_dispatch.c +3 −3 Original line number Diff line number Diff line Loading @@ -2120,7 +2120,7 @@ static void _dispatcher_power_down(struct adreno_device *adreno_dev) mutex_unlock(&device->mutex); } static void adreno_dispatcher_work(struct work_struct *work) static void adreno_dispatcher_work(struct kthread_work *work) { struct adreno_dispatcher *dispatcher = container_of(work, struct adreno_dispatcher, work); Loading Loading @@ -2180,7 +2180,7 @@ void adreno_dispatcher_schedule(struct kgsl_device *device) struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; kgsl_schedule_work(&dispatcher->work); queue_kthread_work(&kgsl_driver.worker, &dispatcher->work); } /** Loading Loading @@ -2476,7 +2476,7 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev) setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer, (unsigned long) adreno_dev); INIT_WORK(&dispatcher->work, adreno_dispatcher_work); init_kthread_work(&dispatcher->work, adreno_dispatcher_work); init_completion(&dispatcher->idle_gate); complete_all(&dispatcher->idle_gate); Loading
drivers/gpu/msm/adreno_dispatch.h +1 −1 Original line number Diff line number Diff line Loading @@ -91,7 +91,7 @@ struct adreno_dispatcher { atomic_t fault; struct plist_head pending; spinlock_t plist_lock; struct work_struct work; struct kthread_work work; struct kobject kobj; struct completion idle_gate; unsigned int disp_preempt_fair_sched; Loading
drivers/gpu/msm/kgsl.c +14 −0 Original line number Diff line number Diff line Loading @@ -4060,6 +4060,8 @@ static void kgsl_core_exit(void) static int __init kgsl_core_init(void) { int result = 0; struct sched_param param = { .sched_priority = 2 }; /* alloc major and minor device numbers */ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX, "kgsl"); Loading Loading @@ -4125,6 +4127,18 @@ static int __init kgsl_core_init(void) kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); init_kthread_worker(&kgsl_driver.worker); kgsl_driver.worker_thread = kthread_run(kthread_worker_fn, &kgsl_driver.worker, "kgsl_worker_thread"); if (IS_ERR(kgsl_driver.worker_thread)) { pr_err("unable to start kgsl thread\n"); goto err; } sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, ¶m); kgsl_events_init(); result = kgsl_cmdbatch_init(); Loading
drivers/gpu/msm/kgsl.h +4 −1 Original line number Diff line number Diff line Loading @@ -26,6 +26,7 @@ #include <linux/mm.h> #include <linux/dma-attrs.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <asm/cacheflush.h> /* The number of memstore arrays limits the number of contexts allowed. Loading Loading @@ -132,6 +133,8 @@ struct kgsl_driver { unsigned int full_cache_threshold; struct workqueue_struct *workqueue; struct workqueue_struct *mem_workqueue; struct kthread_worker worker; struct task_struct *worker_thread; }; extern struct kgsl_driver kgsl_driver; Loading Loading @@ -275,7 +278,7 @@ struct kgsl_event { void *priv; struct list_head node; unsigned int created; struct work_struct work; struct kthread_work work; int result; struct kgsl_event_group *group; }; Loading
drivers/gpu/msm/kgsl_events.c +4 −4 Original line number Diff line number Diff line Loading @@ -32,7 +32,7 @@ static inline void signal_event(struct kgsl_device *device, { list_del(&event->node); event->result = result; queue_work(device->events_wq, &event->work); queue_kthread_work(&kgsl_driver.worker, &event->work); } /** Loading @@ -42,7 +42,7 @@ static inline void signal_event(struct kgsl_device *device, * Each event callback has its own work struct and is run on a event specific * workqeuue. This is the worker that queues up the event callback function. */ static void _kgsl_event_worker(struct work_struct *work) static void _kgsl_event_worker(struct kthread_work *work) { struct kgsl_event *event = container_of(work, struct kgsl_event, work); int id = KGSL_CONTEXT_ID(event->context); Loading Loading @@ -282,7 +282,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, event->created = jiffies; event->group = group; INIT_WORK(&event->work, _kgsl_event_worker); init_kthread_work(&event->work, _kgsl_event_worker); trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func); Loading @@ -297,7 +297,7 @@ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, if (timestamp_cmp(retired, timestamp) >= 0) { event->result = KGSL_EVENT_RETIRED; queue_work(device->events_wq, &event->work); queue_kthread_work(&kgsl_driver.worker, &event->work); spin_unlock(&group->lock); return 0; } Loading