Loading drivers/gpu/msm/adreno_a6xx_gmu.c +32 −0 Original line number Diff line number Diff line Loading @@ -1462,6 +1462,37 @@ static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, } } static int a6xx_gmu_wait_for_active_transition( struct adreno_device *adreno_dev) { unsigned int reg, num_retries; struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); if (!gmu_core_isenabled(device)) return 0; gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); for (num_retries = 0; reg != GPU_HW_ACTIVE && num_retries < 100; num_retries++) { /* Wait for small time before trying again */ udelay(5); gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); } if (reg == GPU_HW_ACTIVE) return 0; dev_err(&gmu->pdev->dev, "GMU failed to move to ACTIVE state, Current state: 0x%x\n", reg); return -ETIMEDOUT; } struct gmu_dev_ops adreno_a6xx_gmudev = { .load_firmware = a6xx_gmu_load_firmware, .oob_set = a6xx_gmu_oob_set, Loading @@ -1477,6 +1508,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .ifpc_store = a6xx_gmu_ifpc_store, .ifpc_show = a6xx_gmu_ifpc_show, .snapshot = a6xx_gmu_snapshot, .wait_for_active_transition = a6xx_gmu_wait_for_active_transition, .gmu2host_intr_mask = HFI_IRQ_MASK, .gmu_ao_intr_mask = GMU_AO_INT_MASK, }; drivers/gpu/msm/adreno_a6xx_preempt.c +21 −0 Original line number Diff line number Diff line Loading @@ -241,6 +241,7 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) unsigned int contextidr, cntl; unsigned long flags; struct adreno_preemption *preempt = &adreno_dev->preempt; struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); cntl = (((preempt->preempt_level << 6) & 0xC0) | ((preempt->skipsaverestore << 9) & 0x200) | Loading Loading @@ -360,6 +361,26 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) upper_32_bits(gpuaddr), FENCE_STATUS_WRITEDROPPED1_MASK); /* * Above fence writes will make sure GMU comes out of * IFPC state if its was in IFPC state but it doesn't * guarantee that GMU FW actually moved to ACTIVE state * i.e. wake-up from IFPC is complete. * Wait for GMU to move to ACTIVE state before triggering * preemption. This is require to make sure CP doesn't * interrupt GMU during wake-up from IFPC. */ if (GMU_DEV_OP_VALID(gmu_dev_ops, wait_for_active_transition)) { if (gmu_dev_ops->wait_for_active_transition(adreno_dev)) { adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); adreno_dispatcher_schedule(device); return; } } adreno_dev->next_rb = next; /* Start the timer to detect a stuck preemption */ Loading drivers/gpu/msm/kgsl_gmu_core.h +1 −0 Original line number Diff line number Diff line Loading @@ -149,6 +149,7 @@ struct gmu_dev_ops { unsigned int val); unsigned int (*ifpc_show)(struct adreno_device *adreno_dev); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); int (*wait_for_active_transition)(struct adreno_device *adreno_dev); const unsigned int gmu2host_intr_mask; const unsigned int gmu_ao_intr_mask; }; Loading Loading
drivers/gpu/msm/adreno_a6xx_gmu.c +32 −0 Original line number Diff line number Diff line Loading @@ -1462,6 +1462,37 @@ static void a6xx_gmu_snapshot(struct adreno_device *adreno_dev, } } static int a6xx_gmu_wait_for_active_transition( struct adreno_device *adreno_dev) { unsigned int reg, num_retries; struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); if (!gmu_core_isenabled(device)) return 0; gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); for (num_retries = 0; reg != GPU_HW_ACTIVE && num_retries < 100; num_retries++) { /* Wait for small time before trying again */ udelay(5); gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); } if (reg == GPU_HW_ACTIVE) return 0; dev_err(&gmu->pdev->dev, "GMU failed to move to ACTIVE state, Current state: 0x%x\n", reg); return -ETIMEDOUT; } struct gmu_dev_ops adreno_a6xx_gmudev = { .load_firmware = a6xx_gmu_load_firmware, .oob_set = a6xx_gmu_oob_set, Loading @@ -1477,6 +1508,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .ifpc_store = a6xx_gmu_ifpc_store, .ifpc_show = a6xx_gmu_ifpc_show, .snapshot = a6xx_gmu_snapshot, .wait_for_active_transition = a6xx_gmu_wait_for_active_transition, .gmu2host_intr_mask = HFI_IRQ_MASK, .gmu_ao_intr_mask = GMU_AO_INT_MASK, };
drivers/gpu/msm/adreno_a6xx_preempt.c +21 −0 Original line number Diff line number Diff line Loading @@ -241,6 +241,7 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) unsigned int contextidr, cntl; unsigned long flags; struct adreno_preemption *preempt = &adreno_dev->preempt; struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); cntl = (((preempt->preempt_level << 6) & 0xC0) | ((preempt->skipsaverestore << 9) & 0x200) | Loading Loading @@ -360,6 +361,26 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) upper_32_bits(gpuaddr), FENCE_STATUS_WRITEDROPPED1_MASK); /* * Above fence writes will make sure GMU comes out of * IFPC state if its was in IFPC state but it doesn't * guarantee that GMU FW actually moved to ACTIVE state * i.e. wake-up from IFPC is complete. * Wait for GMU to move to ACTIVE state before triggering * preemption. This is require to make sure CP doesn't * interrupt GMU during wake-up from IFPC. */ if (GMU_DEV_OP_VALID(gmu_dev_ops, wait_for_active_transition)) { if (gmu_dev_ops->wait_for_active_transition(adreno_dev)) { adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); adreno_dispatcher_schedule(device); return; } } adreno_dev->next_rb = next; /* Start the timer to detect a stuck preemption */ Loading
drivers/gpu/msm/kgsl_gmu_core.h +1 −0 Original line number Diff line number Diff line Loading @@ -149,6 +149,7 @@ struct gmu_dev_ops { unsigned int val); unsigned int (*ifpc_show)(struct adreno_device *adreno_dev); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); int (*wait_for_active_transition)(struct adreno_device *adreno_dev); const unsigned int gmu2host_intr_mask; const unsigned int gmu_ao_intr_mask; }; Loading