Loading drivers/gpu/msm/adreno.h +14 −6 Original line number Diff line number Diff line Loading @@ -1291,6 +1291,12 @@ adreno_get_rptr(struct adreno_ringbuffer *rb) return rb->rptr; } static inline bool adreno_is_preemption_enabled( struct adreno_device *adreno_dev) { return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv); } /** * adreno_ctx_get_rb() - Return the ringbuffer that a context should * use based on priority Loading @@ -1308,6 +1314,14 @@ static inline struct adreno_ringbuffer *adreno_ctx_get_rb( context = &(drawctxt->base); /* * If preemption is disabled then everybody needs to go on the same * ringbuffer */ if (!adreno_is_preemption_enabled(adreno_dev)) return &(adreno_dev->ringbuffers[0]); /* * Math to convert the priority field in context structure to an RB ID. * Divide up the context priority based on number of ringbuffer levels. Loading Loading @@ -1360,12 +1374,6 @@ void adreno_writereg64(struct adreno_device *adreno_dev, unsigned int adreno_iommu_set_apriv(struct adreno_device *adreno_dev, unsigned int *cmds, int set); static inline bool adreno_is_preemption_enabled( struct adreno_device *adreno_dev) { return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv); } static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev) { return adreno_dev->fast_hang_detect && Loading drivers/gpu/msm/adreno_drawctxt.c +32 −37 Original line number Diff line number Diff line Loading @@ -217,6 +217,24 @@ done: return ret; } static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt, struct kgsl_cmdbatch **list) { int count = 0; while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; list[count++] = cmdbatch; } return count; } /** * adreno_drawctxt_invalidate() - Invalidate an adreno draw context * @device: Pointer to the KGSL device structure for the GPU Loading @@ -229,8 +247,8 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_context *drawctxt = ADRENO_CONTEXT(context); struct kgsl_cmdbatch *inv_cmdbatchs[ADRENO_CONTEXT_CMDQUEUE_SIZE]; int i, cmd_num = 0; struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; int i, count; trace_adreno_drawctxt_invalidate(drawctxt); Loading @@ -250,24 +268,15 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, drawctxt->timestamp); /* Get rid of commands still waiting in the queue */ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; count = drawctxt_detach_cmdbatches(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { kgsl_cancel_events_timestamp(device, &context->events, cmdbatch->timestamp); inv_cmdbatchs[cmd_num++] = cmdbatch; list[i]->timestamp); kgsl_cmdbatch_destroy(list[i]); } spin_unlock(&drawctxt->lock); for (; cmd_num > 0; cmd_num--) kgsl_cmdbatch_destroy(inv_cmdbatchs[i]); /* Make sure all pending events are processed or cancelled */ kgsl_flush_event_group(device, &context->events); Loading Loading @@ -428,7 +437,8 @@ void adreno_drawctxt_detach(struct kgsl_context *context) struct adreno_device *adreno_dev; struct adreno_context *drawctxt; struct adreno_ringbuffer *rb; int ret; int ret, count, i; struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; if (context == NULL) return; Loading @@ -453,34 +463,19 @@ void adreno_drawctxt_detach(struct kgsl_context *context) mutex_unlock(&device->mutex); spin_lock(&drawctxt->lock); while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; count = drawctxt_detach_cmdbatches(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { /* * If the context is deteached while we are waiting for * the next command in GFT SKIP CMD, print the context * detached status here. */ adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch); /* * Don't hold the drawctxt mutex while the cmdbatch is being * destroyed because the cmdbatch destroy takes the device * mutex and the world falls in on itself */ kgsl_cmdbatch_destroy(cmdbatch); spin_lock(&drawctxt->lock); adreno_fault_skipcmd_detached(device, drawctxt, list[i]); kgsl_cmdbatch_destroy(list[i]); } spin_unlock(&drawctxt->lock); /* * internal_timestamp is set in adreno_ringbuffer_addcmds, * which holds the device mutex. Loading Loading
drivers/gpu/msm/adreno.h +14 −6 Original line number Diff line number Diff line Loading @@ -1291,6 +1291,12 @@ adreno_get_rptr(struct adreno_ringbuffer *rb) return rb->rptr; } static inline bool adreno_is_preemption_enabled( struct adreno_device *adreno_dev) { return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv); } /** * adreno_ctx_get_rb() - Return the ringbuffer that a context should * use based on priority Loading @@ -1308,6 +1314,14 @@ static inline struct adreno_ringbuffer *adreno_ctx_get_rb( context = &(drawctxt->base); /* * If preemption is disabled then everybody needs to go on the same * ringbuffer */ if (!adreno_is_preemption_enabled(adreno_dev)) return &(adreno_dev->ringbuffers[0]); /* * Math to convert the priority field in context structure to an RB ID. * Divide up the context priority based on number of ringbuffer levels. Loading Loading @@ -1360,12 +1374,6 @@ void adreno_writereg64(struct adreno_device *adreno_dev, unsigned int adreno_iommu_set_apriv(struct adreno_device *adreno_dev, unsigned int *cmds, int set); static inline bool adreno_is_preemption_enabled( struct adreno_device *adreno_dev) { return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv); } static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev) { return adreno_dev->fast_hang_detect && Loading
drivers/gpu/msm/adreno_drawctxt.c +32 −37 Original line number Diff line number Diff line Loading @@ -217,6 +217,24 @@ done: return ret; } static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt, struct kgsl_cmdbatch **list) { int count = 0; while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; list[count++] = cmdbatch; } return count; } /** * adreno_drawctxt_invalidate() - Invalidate an adreno draw context * @device: Pointer to the KGSL device structure for the GPU Loading @@ -229,8 +247,8 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_context *drawctxt = ADRENO_CONTEXT(context); struct kgsl_cmdbatch *inv_cmdbatchs[ADRENO_CONTEXT_CMDQUEUE_SIZE]; int i, cmd_num = 0; struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; int i, count; trace_adreno_drawctxt_invalidate(drawctxt); Loading @@ -250,24 +268,15 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, drawctxt->timestamp); /* Get rid of commands still waiting in the queue */ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; count = drawctxt_detach_cmdbatches(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { kgsl_cancel_events_timestamp(device, &context->events, cmdbatch->timestamp); inv_cmdbatchs[cmd_num++] = cmdbatch; list[i]->timestamp); kgsl_cmdbatch_destroy(list[i]); } spin_unlock(&drawctxt->lock); for (; cmd_num > 0; cmd_num--) kgsl_cmdbatch_destroy(inv_cmdbatchs[i]); /* Make sure all pending events are processed or cancelled */ kgsl_flush_event_group(device, &context->events); Loading Loading @@ -428,7 +437,8 @@ void adreno_drawctxt_detach(struct kgsl_context *context) struct adreno_device *adreno_dev; struct adreno_context *drawctxt; struct adreno_ringbuffer *rb; int ret; int ret, count, i; struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; if (context == NULL) return; Loading @@ -453,34 +463,19 @@ void adreno_drawctxt_detach(struct kgsl_context *context) mutex_unlock(&device->mutex); spin_lock(&drawctxt->lock); while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; count = drawctxt_detach_cmdbatches(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { /* * If the context is deteached while we are waiting for * the next command in GFT SKIP CMD, print the context * detached status here. */ adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch); /* * Don't hold the drawctxt mutex while the cmdbatch is being * destroyed because the cmdbatch destroy takes the device * mutex and the world falls in on itself */ kgsl_cmdbatch_destroy(cmdbatch); spin_lock(&drawctxt->lock); adreno_fault_skipcmd_detached(device, drawctxt, list[i]); kgsl_cmdbatch_destroy(list[i]); } spin_unlock(&drawctxt->lock); /* * internal_timestamp is set in adreno_ringbuffer_addcmds, * which holds the device mutex. Loading