Loading drivers/gpu/msm/adreno.c +5 −1 Original line number Original line Diff line number Diff line Loading @@ -1823,7 +1823,7 @@ static int _adreno_start(struct adreno_device *adreno_dev) status = kgsl_mmu_start(device); status = kgsl_mmu_start(device); if (status) if (status) goto error_pwr_off; goto error_boot_oob_clear; status = adreno_ocmem_malloc(adreno_dev); status = adreno_ocmem_malloc(adreno_dev); if (status) { if (status) { Loading Loading @@ -2036,6 +2036,10 @@ static int _adreno_start(struct adreno_device *adreno_dev) error_mmu_off: error_mmu_off: kgsl_mmu_stop(&device->mmu); kgsl_mmu_stop(&device->mmu); error_boot_oob_clear: if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) gmu_core_dev_oob_clear(device, oob_boot_slumber); error_pwr_off: error_pwr_off: /* set the state back to original state */ /* set the state back to original state */ kgsl_pwrctrl_change_state(device, state); kgsl_pwrctrl_change_state(device, state); Loading drivers/gpu/msm/kgsl_gmu.c +28 −12 Original line number Original line Diff line number Diff line Loading @@ -26,6 +26,8 @@ #define GMU_CONTEXT_KERNEL 1 #define GMU_CONTEXT_KERNEL 1 #define GMU_KERNEL_ENTRIES 16 #define GMU_KERNEL_ENTRIES 16 #define GMU_CM3_CFG_NONMASKINTR_SHIFT 9 struct gmu_iommu_context { struct gmu_iommu_context { const char *name; const char *name; struct device *dev; struct device *dev; Loading Loading @@ -881,6 +883,24 @@ static int gmu_rpmh_init(struct kgsl_device *device, return rpmh_arc_votes_init(device, gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE); return rpmh_arc_votes_init(device, gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE); } } static void send_nmi_to_gmu(struct adreno_device *adreno_dev) { /* Mask so there's no interrupt caused by NMI */ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF); /* Make sure the interrupt is masked before causing it */ wmb(); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, (1 << GMU_CM3_CFG_NONMASKINTR_SHIFT)); /* Make sure the NMI is invoked before we proceed*/ wmb(); } static irqreturn_t gmu_irq_handler(int irq, void *data) static irqreturn_t gmu_irq_handler(int irq, void *data) { { struct kgsl_device *device = data; struct kgsl_device *device = data; Loading @@ -902,6 +922,13 @@ static irqreturn_t gmu_irq_handler(int irq, void *data) ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, (mask | GMU_INT_WDOG_BITE)); (mask | GMU_INT_WDOG_BITE)); send_nmi_to_gmu(adreno_dev); /* * There is sufficient delay for the GMU to have finished * handling the NMI before snapshot is taken, as the fault * worker is scheduled below. */ dev_err_ratelimited(&gmu->pdev->dev, dev_err_ratelimited(&gmu->pdev->dev, "GMU watchdog expired interrupt received\n"); "GMU watchdog expired interrupt received\n"); adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); Loading Loading @@ -1472,19 +1499,8 @@ static void gmu_snapshot(struct kgsl_device *device) struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); /* Mask so there's no interrupt caused by NMI */ send_nmi_to_gmu(adreno_dev); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF); /* Make sure the interrupt is masked before causing it */ wmb(); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, (1 << 9)); /* Wait for the NMI to be handled */ /* Wait for the NMI to be handled */ wmb(); udelay(100); udelay(100); kgsl_device_snapshot(device, NULL, true); kgsl_device_snapshot(device, NULL, true); Loading Loading
drivers/gpu/msm/adreno.c +5 −1 Original line number Original line Diff line number Diff line Loading @@ -1823,7 +1823,7 @@ static int _adreno_start(struct adreno_device *adreno_dev) status = kgsl_mmu_start(device); status = kgsl_mmu_start(device); if (status) if (status) goto error_pwr_off; goto error_boot_oob_clear; status = adreno_ocmem_malloc(adreno_dev); status = adreno_ocmem_malloc(adreno_dev); if (status) { if (status) { Loading Loading @@ -2036,6 +2036,10 @@ static int _adreno_start(struct adreno_device *adreno_dev) error_mmu_off: error_mmu_off: kgsl_mmu_stop(&device->mmu); kgsl_mmu_stop(&device->mmu); error_boot_oob_clear: if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) gmu_core_dev_oob_clear(device, oob_boot_slumber); error_pwr_off: error_pwr_off: /* set the state back to original state */ /* set the state back to original state */ kgsl_pwrctrl_change_state(device, state); kgsl_pwrctrl_change_state(device, state); Loading
drivers/gpu/msm/kgsl_gmu.c +28 −12 Original line number Original line Diff line number Diff line Loading @@ -26,6 +26,8 @@ #define GMU_CONTEXT_KERNEL 1 #define GMU_CONTEXT_KERNEL 1 #define GMU_KERNEL_ENTRIES 16 #define GMU_KERNEL_ENTRIES 16 #define GMU_CM3_CFG_NONMASKINTR_SHIFT 9 struct gmu_iommu_context { struct gmu_iommu_context { const char *name; const char *name; struct device *dev; struct device *dev; Loading Loading @@ -881,6 +883,24 @@ static int gmu_rpmh_init(struct kgsl_device *device, return rpmh_arc_votes_init(device, gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE); return rpmh_arc_votes_init(device, gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE); } } static void send_nmi_to_gmu(struct adreno_device *adreno_dev) { /* Mask so there's no interrupt caused by NMI */ adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF); /* Make sure the interrupt is masked before causing it */ wmb(); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, (1 << GMU_CM3_CFG_NONMASKINTR_SHIFT)); /* Make sure the NMI is invoked before we proceed*/ wmb(); } static irqreturn_t gmu_irq_handler(int irq, void *data) static irqreturn_t gmu_irq_handler(int irq, void *data) { { struct kgsl_device *device = data; struct kgsl_device *device = data; Loading @@ -902,6 +922,13 @@ static irqreturn_t gmu_irq_handler(int irq, void *data) ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, (mask | GMU_INT_WDOG_BITE)); (mask | GMU_INT_WDOG_BITE)); send_nmi_to_gmu(adreno_dev); /* * There is sufficient delay for the GMU to have finished * handling the NMI before snapshot is taken, as the fault * worker is scheduled below. */ dev_err_ratelimited(&gmu->pdev->dev, dev_err_ratelimited(&gmu->pdev->dev, "GMU watchdog expired interrupt received\n"); "GMU watchdog expired interrupt received\n"); adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); Loading Loading @@ -1472,19 +1499,8 @@ static void gmu_snapshot(struct kgsl_device *device) struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); struct gmu_device *gmu = KGSL_GMU_DEVICE(device); /* Mask so there's no interrupt caused by NMI */ send_nmi_to_gmu(adreno_dev); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF); /* Make sure the interrupt is masked before causing it */ wmb(); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0); adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_CM3_CFG, (1 << 9)); /* Wait for the NMI to be handled */ /* Wait for the NMI to be handled */ wmb(); udelay(100); udelay(100); kgsl_device_snapshot(device, NULL, true); kgsl_device_snapshot(device, NULL, true); Loading