Loading kernel/irq/cpuhotplug.c +6 −2 Original line number Diff line number Diff line Loading @@ -115,6 +115,8 @@ static bool migrate_one_irq(struct irq_desc *desc) affinity = &available_cpus; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { const struct cpumask *default_affinity; /* * If the interrupt is managed, then shut it down and leave * the affinity untouched. Loading @@ -124,6 +126,8 @@ static bool migrate_one_irq(struct irq_desc *desc) irq_shutdown(desc); return false; } default_affinity = desc->affinity_hint ? : irq_default_affinity; /* * The order of preference for selecting a fallback CPU is * Loading @@ -133,9 +137,9 @@ static bool migrate_one_irq(struct irq_desc *desc) */ cpumask_andnot(&available_cpus, cpu_online_mask, cpu_isolated_mask); if (cpumask_intersects(&available_cpus, irq_default_affinity)) if (cpumask_intersects(&available_cpus, default_affinity)) cpumask_and(&available_cpus, &available_cpus, irq_default_affinity); default_affinity); else if (cpumask_empty(&available_cpus)) affinity = cpu_online_mask; Loading kernel/power/qos.c +22 −5 Original line number Diff line number Diff line Loading @@ -541,19 +541,29 @@ static void pm_qos_irq_release(struct kref *ref) } static void pm_qos_irq_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) const cpumask_t *unused_mask) { unsigned long flags; struct pm_qos_request *req = container_of(notify, struct pm_qos_request, irq_notify); struct pm_qos_constraints *c = pm_qos_array[req->pm_qos_class]->constraints; struct irq_desc *desc = irq_to_desc(req->irq); struct cpumask *new_affinity = irq_data_get_effective_affinity_mask(&desc->irq_data); bool affinity_changed = false; spin_lock_irqsave(&pm_qos_lock, flags); cpumask_copy(&req->cpus_affine, mask); if (!cpumask_equal(&req->cpus_affine, new_affinity)) { cpumask_copy(&req->cpus_affine, new_affinity); affinity_changed = true; } spin_unlock_irqrestore(&pm_qos_lock, flags); pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio); if (affinity_changed) pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio); } #endif Loading Loading @@ -598,9 +608,16 @@ void pm_qos_add_request(struct pm_qos_request *req, if (!desc) return; mask = desc->irq_data.common->affinity; /* * If the IRQ is not started, the effective affinity * won't be set. So fallback to the default affinity. */ mask = irq_data_get_effective_affinity_mask( &desc->irq_data); if (cpumask_empty(mask)) mask = irq_data_get_affinity_mask( &desc->irq_data); /* Get the current affinity */ cpumask_copy(&req->cpus_affine, mask); req->irq_notify.irq = req->irq; req->irq_notify.notify = pm_qos_irq_notify; Loading Loading
kernel/irq/cpuhotplug.c +6 −2 Original line number Diff line number Diff line Loading @@ -115,6 +115,8 @@ static bool migrate_one_irq(struct irq_desc *desc) affinity = &available_cpus; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { const struct cpumask *default_affinity; /* * If the interrupt is managed, then shut it down and leave * the affinity untouched. Loading @@ -124,6 +126,8 @@ static bool migrate_one_irq(struct irq_desc *desc) irq_shutdown(desc); return false; } default_affinity = desc->affinity_hint ? : irq_default_affinity; /* * The order of preference for selecting a fallback CPU is * Loading @@ -133,9 +137,9 @@ static bool migrate_one_irq(struct irq_desc *desc) */ cpumask_andnot(&available_cpus, cpu_online_mask, cpu_isolated_mask); if (cpumask_intersects(&available_cpus, irq_default_affinity)) if (cpumask_intersects(&available_cpus, default_affinity)) cpumask_and(&available_cpus, &available_cpus, irq_default_affinity); default_affinity); else if (cpumask_empty(&available_cpus)) affinity = cpu_online_mask; Loading
kernel/power/qos.c +22 −5 Original line number Diff line number Diff line Loading @@ -541,19 +541,29 @@ static void pm_qos_irq_release(struct kref *ref) } static void pm_qos_irq_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) const cpumask_t *unused_mask) { unsigned long flags; struct pm_qos_request *req = container_of(notify, struct pm_qos_request, irq_notify); struct pm_qos_constraints *c = pm_qos_array[req->pm_qos_class]->constraints; struct irq_desc *desc = irq_to_desc(req->irq); struct cpumask *new_affinity = irq_data_get_effective_affinity_mask(&desc->irq_data); bool affinity_changed = false; spin_lock_irqsave(&pm_qos_lock, flags); cpumask_copy(&req->cpus_affine, mask); if (!cpumask_equal(&req->cpus_affine, new_affinity)) { cpumask_copy(&req->cpus_affine, new_affinity); affinity_changed = true; } spin_unlock_irqrestore(&pm_qos_lock, flags); pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio); if (affinity_changed) pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio); } #endif Loading Loading @@ -598,9 +608,16 @@ void pm_qos_add_request(struct pm_qos_request *req, if (!desc) return; mask = desc->irq_data.common->affinity; /* * If the IRQ is not started, the effective affinity * won't be set. So fallback to the default affinity. */ mask = irq_data_get_effective_affinity_mask( &desc->irq_data); if (cpumask_empty(mask)) mask = irq_data_get_affinity_mask( &desc->irq_data); /* Get the current affinity */ cpumask_copy(&req->cpus_affine, mask); req->irq_notify.irq = req->irq; req->irq_notify.notify = pm_qos_irq_notify; Loading