Loading drivers/acpi/events/evgpe.c +13 −4 Original line number Diff line number Diff line Loading @@ -501,6 +501,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) * an interrupt handler. * ******************************************************************************/ static void acpi_ev_asynch_enable_gpe(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) { Loading Loading @@ -576,22 +577,30 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) method_node))); } } /* Defer enabling of GPE until all notify handlers are done */ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, gpe_event_info); return_VOID; } if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) == static void acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after * handling the event. */ status = acpi_hw_clear_gpe(&local_gpe_event_info); status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_VOID; } } /* Enable this GPE */ (void)acpi_hw_write_gpe_enable_reg(&local_gpe_event_info); (void)acpi_hw_write_gpe_enable_reg(gpe_event_info); return_VOID; } Loading drivers/acpi/osl.c +8 −34 Original line number Diff line number Diff line Loading @@ -665,25 +665,6 @@ static void acpi_os_execute_deferred(struct work_struct *work) dpc->function(dpc->context); kfree(dpc); /* Yield cpu to notify thread */ cond_resched(); return; } static void acpi_os_execute_notify(struct work_struct *work) { struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); if (!dpc) { printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); return; } dpc->function(dpc->context); kfree(dpc); return; } Loading @@ -707,7 +688,7 @@ acpi_status acpi_os_execute(acpi_execute_type type, { acpi_status status = AE_OK; struct acpi_os_dpc *dpc; struct workqueue_struct *queue; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); Loading @@ -731,21 +712,14 @@ acpi_status acpi_os_execute(acpi_execute_type type, dpc->function = function; dpc->context = context; if (type == OSL_NOTIFY_HANDLER) { INIT_WORK(&dpc->work, acpi_os_execute_notify); if (!queue_work(kacpi_notify_wq, &dpc->work)) { status = AE_ERROR; kfree(dpc); } } else { INIT_WORK(&dpc->work, acpi_os_execute_deferred); if (!queue_work(kacpid_wq, &dpc->work)) { queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq; if (!queue_work(queue, &dpc->work)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Call to queue_work() failed.\n")); status = AE_ERROR; kfree(dpc); } } return_ACPI_STATUS(status); } Loading Loading
drivers/acpi/events/evgpe.c +13 −4 Original line number Diff line number Diff line Loading @@ -501,6 +501,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) * an interrupt handler. * ******************************************************************************/ static void acpi_ev_asynch_enable_gpe(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) { Loading Loading @@ -576,22 +577,30 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) method_node))); } } /* Defer enabling of GPE until all notify handlers are done */ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, gpe_event_info); return_VOID; } if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) == static void acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after * handling the event. */ status = acpi_hw_clear_gpe(&local_gpe_event_info); status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_VOID; } } /* Enable this GPE */ (void)acpi_hw_write_gpe_enable_reg(&local_gpe_event_info); (void)acpi_hw_write_gpe_enable_reg(gpe_event_info); return_VOID; } Loading
drivers/acpi/osl.c +8 −34 Original line number Diff line number Diff line Loading @@ -665,25 +665,6 @@ static void acpi_os_execute_deferred(struct work_struct *work) dpc->function(dpc->context); kfree(dpc); /* Yield cpu to notify thread */ cond_resched(); return; } static void acpi_os_execute_notify(struct work_struct *work) { struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); if (!dpc) { printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); return; } dpc->function(dpc->context); kfree(dpc); return; } Loading @@ -707,7 +688,7 @@ acpi_status acpi_os_execute(acpi_execute_type type, { acpi_status status = AE_OK; struct acpi_os_dpc *dpc; struct workqueue_struct *queue; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n", function, context)); Loading @@ -731,21 +712,14 @@ acpi_status acpi_os_execute(acpi_execute_type type, dpc->function = function; dpc->context = context; if (type == OSL_NOTIFY_HANDLER) { INIT_WORK(&dpc->work, acpi_os_execute_notify); if (!queue_work(kacpi_notify_wq, &dpc->work)) { status = AE_ERROR; kfree(dpc); } } else { INIT_WORK(&dpc->work, acpi_os_execute_deferred); if (!queue_work(kacpid_wq, &dpc->work)) { queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq; if (!queue_work(queue, &dpc->work)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Call to queue_work() failed.\n")); status = AE_ERROR; kfree(dpc); } } return_ACPI_STATUS(status); } Loading