Loading drivers/gpu/msm/a6xx_reg.h +3 −1 Original line number Diff line number Diff line Loading @@ -820,7 +820,9 @@ #define A6XX_GBIF_QSB_SIDE3 0x3c06 #define A6XX_GBIF_HALT 0x3c45 #define A6XX_GBIF_HALT_ACK 0x3c46 #define A6XX_GBIF_HALT_MASK 0x2 #define A6XX_GBIF_CLIENT_HALT_MASK BIT(0) #define A6XX_GBIF_ARB_HALT_MASK BIT(1) #define A6XX_GBIF_PERF_PWR_CNT_EN 0x3cc0 #define A6XX_GBIF_PERF_CNT_SEL 0x3cc2 Loading drivers/gpu/msm/adreno.c +42 −2 Original line number Diff line number Diff line Loading @@ -1415,6 +1415,43 @@ static void adreno_fault_detect_init(struct adreno_device *adreno_dev) adreno_fault_detect_start(adreno_dev); } /** * adreno_clear_pending_transactions() - Clear transactions in GBIF/VBIF pipe * @device: Pointer to the device whose GBIF/VBIF pipe is to be cleared */ int adreno_clear_pending_transactions(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); int ret = 0; if (adreno_has_gbif(adreno_dev)) { /* Halt new client requests */ adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, gpudev->gbif_client_halt_mask); ret = adreno_wait_for_halt_ack(device, ADRENO_REG_GBIF_HALT_ACK, gpudev->gbif_client_halt_mask); /* Halt all AXI requests */ adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, gpudev->gbif_arb_halt_mask); ret = adreno_wait_for_halt_ack(device, ADRENO_REG_GBIF_HALT_ACK, gpudev->gbif_arb_halt_mask); } else { unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask; adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask); ret = adreno_wait_for_halt_ack(device, ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask); adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0); } return ret; } static int adreno_init(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); Loading Loading @@ -2011,7 +2048,10 @@ static int adreno_stop(struct kgsl_device *device) error = -EINVAL; } adreno_vbif_clear_pending_transactions(device); adreno_clear_pending_transactions(device); /* The halt is not cleared in the above function if we have GBIF */ adreno_deassert_gbif_halt(adreno_dev); kgsl_mmu_stop(&device->mmu); Loading Loading @@ -2060,7 +2100,7 @@ int adreno_reset(struct kgsl_device *device, int fault) /* Try soft reset first */ if (adreno_try_soft_reset(device, fault)) { /* Make sure VBIF is cleared before resetting */ ret = adreno_vbif_clear_pending_transactions(device); ret = adreno_clear_pending_transactions(device); if (ret == 0) { ret = adreno_soft_reset(device); Loading drivers/gpu/msm/adreno.h +10 −41 Original line number Diff line number Diff line Loading @@ -905,6 +905,8 @@ struct adreno_gpudev { struct adreno_irq *irq; int num_prio_levels; unsigned int vbif_xin_halt_ctrl0_mask; unsigned int gbif_client_halt_mask; unsigned int gbif_arb_halt_mask; /* GPU specific function hooks */ void (*irq_trace)(struct adreno_device *, unsigned int status); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); Loading Loading @@ -1898,11 +1900,11 @@ static inline bool adreno_has_gbif(struct adreno_device *adreno_dev) } /** * adreno_wait_for_vbif_halt_ack() - wait for VBIF acknowledgment * adreno_wait_for_halt_ack() - wait for GBIF/VBIF acknowledgment * for given HALT request. * @ack_reg: register offset to wait for acknowledge */ static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device, static inline int adreno_wait_for_halt_ack(struct kgsl_device *device, int ack_reg, unsigned int mask) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); Loading @@ -1919,7 +1921,7 @@ static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device, break; if (time_after(jiffies, wait_for_vbif)) { KGSL_DRV_ERR(device, "Wait limit reached for VBIF XIN Halt\n"); "Wait limit reached for GBIF/VBIF Halt\n"); ret = -ETIMEDOUT; break; } Loading @@ -1928,48 +1930,15 @@ static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device, return ret; } /** * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe * @device: Pointer to the device whose VBIF pipe is to be cleared */ static inline int adreno_vbif_clear_pending_transactions( struct kgsl_device *device) static inline void adreno_deassert_gbif_halt(struct adreno_device *adreno_dev) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask; int ret = 0; if (adreno_has_gbif(adreno_dev)) { /* * Halt GBIF GX first and then CX part. * Need to release CX Halt explicitly in case of SW_RESET. * GX Halt release will be taken care by SW_RESET internally. */ if (gpudev->gx_is_on(adreno_dev)) { adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL, GBIF_HALT_REQUEST); ret = adreno_wait_for_vbif_halt_ack(device, ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS, VBIF_RESET_ACK_MASK); if (ret) return ret; } adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask); ret = adreno_wait_for_vbif_halt_ack(device, ADRENO_REG_GBIF_HALT_ACK, mask); } else { adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask); ret = adreno_wait_for_vbif_halt_ack(device, ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask); adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0); } return ret; if (adreno_has_gbif(adreno_dev)) adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0x0); } int adreno_gmu_fenced_write(struct adreno_device *adreno_dev, enum adreno_regs offset, unsigned int val, unsigned int fence_mask); int adreno_clear_pending_transactions(struct kgsl_device *device); #endif /*__ADRENO_H */ drivers/gpu/msm/adreno_a6xx.c +4 −10 Original line number Diff line number Diff line Loading @@ -2272,8 +2272,6 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev) return _load_gmu_firmware(device); } #define GBIF_CX_HALT_MASK BIT(1) static int a6xx_soft_reset(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); Loading Loading @@ -2313,12 +2311,8 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev) if (!vbif_acked) return -ETIMEDOUT; /* * GBIF GX halt will be released automatically by sw_reset. * Release GBIF CX halt after sw_reset */ if (adreno_has_gbif(adreno_dev)) kgsl_regrmw(device, A6XX_GBIF_HALT, GBIF_CX_HALT_MASK, 0); /* Clear GBIF client halt and CX arbiter halt */ adreno_deassert_gbif_halt(adreno_dev); a6xx_sptprac_enable(adreno_dev); Loading Loading @@ -3752,8 +3746,8 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev) KGSL_PERFCOUNTER_GROUP_VBIF_PWR].reg_count = ARRAY_SIZE(a6xx_perfcounters_gbif_pwr); gpudev->vbif_xin_halt_ctrl0_mask = A6XX_GBIF_HALT_MASK; gpudev->gbif_client_halt_mask = A6XX_GBIF_CLIENT_HALT_MASK; gpudev->gbif_arb_halt_mask = A6XX_GBIF_ARB_HALT_MASK; } else gpudev->vbif_xin_halt_ctrl0_mask = A6XX_VBIF_XIN_HALT_CTRL0_MASK; Loading Loading
drivers/gpu/msm/a6xx_reg.h +3 −1 Original line number Diff line number Diff line Loading @@ -820,7 +820,9 @@ #define A6XX_GBIF_QSB_SIDE3 0x3c06 #define A6XX_GBIF_HALT 0x3c45 #define A6XX_GBIF_HALT_ACK 0x3c46 #define A6XX_GBIF_HALT_MASK 0x2 #define A6XX_GBIF_CLIENT_HALT_MASK BIT(0) #define A6XX_GBIF_ARB_HALT_MASK BIT(1) #define A6XX_GBIF_PERF_PWR_CNT_EN 0x3cc0 #define A6XX_GBIF_PERF_CNT_SEL 0x3cc2 Loading
drivers/gpu/msm/adreno.c +42 −2 Original line number Diff line number Diff line Loading @@ -1415,6 +1415,43 @@ static void adreno_fault_detect_init(struct adreno_device *adreno_dev) adreno_fault_detect_start(adreno_dev); } /** * adreno_clear_pending_transactions() - Clear transactions in GBIF/VBIF pipe * @device: Pointer to the device whose GBIF/VBIF pipe is to be cleared */ int adreno_clear_pending_transactions(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); int ret = 0; if (adreno_has_gbif(adreno_dev)) { /* Halt new client requests */ adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, gpudev->gbif_client_halt_mask); ret = adreno_wait_for_halt_ack(device, ADRENO_REG_GBIF_HALT_ACK, gpudev->gbif_client_halt_mask); /* Halt all AXI requests */ adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, gpudev->gbif_arb_halt_mask); ret = adreno_wait_for_halt_ack(device, ADRENO_REG_GBIF_HALT_ACK, gpudev->gbif_arb_halt_mask); } else { unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask; adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask); ret = adreno_wait_for_halt_ack(device, ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask); adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0); } return ret; } static int adreno_init(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); Loading Loading @@ -2011,7 +2048,10 @@ static int adreno_stop(struct kgsl_device *device) error = -EINVAL; } adreno_vbif_clear_pending_transactions(device); adreno_clear_pending_transactions(device); /* The halt is not cleared in the above function if we have GBIF */ adreno_deassert_gbif_halt(adreno_dev); kgsl_mmu_stop(&device->mmu); Loading Loading @@ -2060,7 +2100,7 @@ int adreno_reset(struct kgsl_device *device, int fault) /* Try soft reset first */ if (adreno_try_soft_reset(device, fault)) { /* Make sure VBIF is cleared before resetting */ ret = adreno_vbif_clear_pending_transactions(device); ret = adreno_clear_pending_transactions(device); if (ret == 0) { ret = adreno_soft_reset(device); Loading
drivers/gpu/msm/adreno.h +10 −41 Original line number Diff line number Diff line Loading @@ -905,6 +905,8 @@ struct adreno_gpudev { struct adreno_irq *irq; int num_prio_levels; unsigned int vbif_xin_halt_ctrl0_mask; unsigned int gbif_client_halt_mask; unsigned int gbif_arb_halt_mask; /* GPU specific function hooks */ void (*irq_trace)(struct adreno_device *, unsigned int status); void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *); Loading Loading @@ -1898,11 +1900,11 @@ static inline bool adreno_has_gbif(struct adreno_device *adreno_dev) } /** * adreno_wait_for_vbif_halt_ack() - wait for VBIF acknowledgment * adreno_wait_for_halt_ack() - wait for GBIF/VBIF acknowledgment * for given HALT request. * @ack_reg: register offset to wait for acknowledge */ static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device, static inline int adreno_wait_for_halt_ack(struct kgsl_device *device, int ack_reg, unsigned int mask) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); Loading @@ -1919,7 +1921,7 @@ static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device, break; if (time_after(jiffies, wait_for_vbif)) { KGSL_DRV_ERR(device, "Wait limit reached for VBIF XIN Halt\n"); "Wait limit reached for GBIF/VBIF Halt\n"); ret = -ETIMEDOUT; break; } Loading @@ -1928,48 +1930,15 @@ static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device, return ret; } /** * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe * @device: Pointer to the device whose VBIF pipe is to be cleared */ static inline int adreno_vbif_clear_pending_transactions( struct kgsl_device *device) static inline void adreno_deassert_gbif_halt(struct adreno_device *adreno_dev) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask; int ret = 0; if (adreno_has_gbif(adreno_dev)) { /* * Halt GBIF GX first and then CX part. * Need to release CX Halt explicitly in case of SW_RESET. * GX Halt release will be taken care by SW_RESET internally. */ if (gpudev->gx_is_on(adreno_dev)) { adreno_writereg(adreno_dev, ADRENO_REG_RBBM_GPR0_CNTL, GBIF_HALT_REQUEST); ret = adreno_wait_for_vbif_halt_ack(device, ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS, VBIF_RESET_ACK_MASK); if (ret) return ret; } adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask); ret = adreno_wait_for_vbif_halt_ack(device, ADRENO_REG_GBIF_HALT_ACK, mask); } else { adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask); ret = adreno_wait_for_vbif_halt_ack(device, ADRENO_REG_VBIF_XIN_HALT_CTRL1, mask); adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0); } return ret; if (adreno_has_gbif(adreno_dev)) adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0x0); } int adreno_gmu_fenced_write(struct adreno_device *adreno_dev, enum adreno_regs offset, unsigned int val, unsigned int fence_mask); int adreno_clear_pending_transactions(struct kgsl_device *device); #endif /*__ADRENO_H */
drivers/gpu/msm/adreno_a6xx.c +4 −10 Original line number Diff line number Diff line Loading @@ -2272,8 +2272,6 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev) return _load_gmu_firmware(device); } #define GBIF_CX_HALT_MASK BIT(1) static int a6xx_soft_reset(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); Loading Loading @@ -2313,12 +2311,8 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev) if (!vbif_acked) return -ETIMEDOUT; /* * GBIF GX halt will be released automatically by sw_reset. * Release GBIF CX halt after sw_reset */ if (adreno_has_gbif(adreno_dev)) kgsl_regrmw(device, A6XX_GBIF_HALT, GBIF_CX_HALT_MASK, 0); /* Clear GBIF client halt and CX arbiter halt */ adreno_deassert_gbif_halt(adreno_dev); a6xx_sptprac_enable(adreno_dev); Loading Loading @@ -3752,8 +3746,8 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev) KGSL_PERFCOUNTER_GROUP_VBIF_PWR].reg_count = ARRAY_SIZE(a6xx_perfcounters_gbif_pwr); gpudev->vbif_xin_halt_ctrl0_mask = A6XX_GBIF_HALT_MASK; gpudev->gbif_client_halt_mask = A6XX_GBIF_CLIENT_HALT_MASK; gpudev->gbif_arb_halt_mask = A6XX_GBIF_ARB_HALT_MASK; } else gpudev->vbif_xin_halt_ctrl0_mask = A6XX_VBIF_XIN_HALT_CTRL0_MASK; Loading