Loading drivers/gpu/msm/kgsl_drawobj.c +19 −18 Original line number Original line Diff line number Diff line Loading @@ -214,8 +214,13 @@ static void drawobj_sync_func(struct kgsl_device *device, trace_syncpoint_timestamp_expire(event->syncobj, trace_syncpoint_timestamp_expire(event->syncobj, event->context, event->timestamp); event->context, event->timestamp); drawobj_sync_expire(device, event); /* * Put down the context ref count only if * this thread successfully clears the pending bit mask. */ if (drawobj_sync_expire(device, event)) kgsl_context_put(event->context); kgsl_context_put(event->context); kgsl_drawobj_put(&event->syncobj->base); kgsl_drawobj_put(&event->syncobj->base); } } Loading Loading @@ -245,24 +250,11 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj) static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) { { struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); unsigned long pending = 0; unsigned int i; unsigned int i; /* Zap the canary timer */ /* Zap the canary timer */ del_timer_sync(&syncobj->timer); del_timer_sync(&syncobj->timer); /* * Copy off the pending list and clear each pending event atomically - * this will render any subsequent asynchronous callback harmless. * This marks each event for deletion. If any pending fence callbacks * run between now and the actual cancel, the associated structures * are kfreed only in the cancel call. */ for_each_set_bit(i, &syncobj->pending, KGSL_MAX_SYNCPOINTS) { if (test_and_clear_bit(i, &syncobj->pending)) __set_bit(i, &pending); } /* /* * Clear all pending events - this will render any subsequent async * Clear all pending events - this will render any subsequent async * callbacks harmless * callbacks harmless Loading @@ -270,8 +262,12 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) for (i = 0; i < syncobj->numsyncs; i++) { for (i = 0; i < syncobj->numsyncs; i++) { struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; /* Don't do anything if the event has already expired */ /* if (!test_bit(i, &pending)) * Don't do anything if the event has already expired. * If this thread clears the pending bit mask then it is * responsible for doing context put. */ if (!test_and_clear_bit(i, &syncobj->pending)) continue; continue; switch (event->type) { switch (event->type) { Loading @@ -279,6 +275,11 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) kgsl_cancel_event(drawobj->device, kgsl_cancel_event(drawobj->device, &event->context->events, event->timestamp, &event->context->events, event->timestamp, drawobj_sync_func, event); drawobj_sync_func, event); /* * Do context put here to make sure the context is alive * till this thread cancels kgsl event. */ kgsl_context_put(event->context); break; break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: case KGSL_CMD_SYNCPOINT_TYPE_FENCE: kgsl_sync_fence_async_cancel(event->handle); kgsl_sync_fence_async_cancel(event->handle); Loading @@ -291,7 +292,7 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) * If we cancelled an event, there's a good chance that the context is * If we cancelled an event, there's a good chance that the context is * on a dispatcher queue, so schedule to get it removed. * on a dispatcher queue, so schedule to get it removed. */ */ if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) && if (!bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS) && drawobj->device->ftbl->drawctxt_sched) drawobj->device->ftbl->drawctxt_sched) drawobj->device->ftbl->drawctxt_sched(drawobj->device, drawobj->device->ftbl->drawctxt_sched(drawobj->device, drawobj->context); drawobj->context); Loading Loading
drivers/gpu/msm/kgsl_drawobj.c +19 −18 Original line number Original line Diff line number Diff line Loading @@ -214,8 +214,13 @@ static void drawobj_sync_func(struct kgsl_device *device, trace_syncpoint_timestamp_expire(event->syncobj, trace_syncpoint_timestamp_expire(event->syncobj, event->context, event->timestamp); event->context, event->timestamp); drawobj_sync_expire(device, event); /* * Put down the context ref count only if * this thread successfully clears the pending bit mask. */ if (drawobj_sync_expire(device, event)) kgsl_context_put(event->context); kgsl_context_put(event->context); kgsl_drawobj_put(&event->syncobj->base); kgsl_drawobj_put(&event->syncobj->base); } } Loading Loading @@ -245,24 +250,11 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj) static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) { { struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); unsigned long pending = 0; unsigned int i; unsigned int i; /* Zap the canary timer */ /* Zap the canary timer */ del_timer_sync(&syncobj->timer); del_timer_sync(&syncobj->timer); /* * Copy off the pending list and clear each pending event atomically - * this will render any subsequent asynchronous callback harmless. * This marks each event for deletion. If any pending fence callbacks * run between now and the actual cancel, the associated structures * are kfreed only in the cancel call. */ for_each_set_bit(i, &syncobj->pending, KGSL_MAX_SYNCPOINTS) { if (test_and_clear_bit(i, &syncobj->pending)) __set_bit(i, &pending); } /* /* * Clear all pending events - this will render any subsequent async * Clear all pending events - this will render any subsequent async * callbacks harmless * callbacks harmless Loading @@ -270,8 +262,12 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) for (i = 0; i < syncobj->numsyncs; i++) { for (i = 0; i < syncobj->numsyncs; i++) { struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; /* Don't do anything if the event has already expired */ /* if (!test_bit(i, &pending)) * Don't do anything if the event has already expired. * If this thread clears the pending bit mask then it is * responsible for doing context put. */ if (!test_and_clear_bit(i, &syncobj->pending)) continue; continue; switch (event->type) { switch (event->type) { Loading @@ -279,6 +275,11 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) kgsl_cancel_event(drawobj->device, kgsl_cancel_event(drawobj->device, &event->context->events, event->timestamp, &event->context->events, event->timestamp, drawobj_sync_func, event); drawobj_sync_func, event); /* * Do context put here to make sure the context is alive * till this thread cancels kgsl event. */ kgsl_context_put(event->context); break; break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: case KGSL_CMD_SYNCPOINT_TYPE_FENCE: kgsl_sync_fence_async_cancel(event->handle); kgsl_sync_fence_async_cancel(event->handle); Loading @@ -291,7 +292,7 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) * If we cancelled an event, there's a good chance that the context is * If we cancelled an event, there's a good chance that the context is * on a dispatcher queue, so schedule to get it removed. * on a dispatcher queue, so schedule to get it removed. */ */ if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) && if (!bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS) && drawobj->device->ftbl->drawctxt_sched) drawobj->device->ftbl->drawctxt_sched) drawobj->device->ftbl->drawctxt_sched(drawobj->device, drawobj->device->ftbl->drawctxt_sched(drawobj->device, drawobj->context); drawobj->context); Loading