Loading kernel/rcu/rcu.h +1 −0 Original line number Diff line number Diff line Loading @@ -474,6 +474,7 @@ void show_rcu_gp_kthreads(void); void rcu_force_quiescent_state(void); void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; #endif /* #else #ifdef CONFIG_TINY_RCU */ #ifdef CONFIG_RCU_NOCB_CPU Loading kernel/rcu/srcutree.c +3 −5 Original line number Diff line number Diff line Loading @@ -465,8 +465,7 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, */ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) { srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, &sdp->work, delay); srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); } /* Loading Loading @@ -664,8 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); srcu_gp_start(sp); queue_delayed_work(system_power_efficient_wq, &sp->work, srcu_get_delay(sp)); queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp)); } raw_spin_unlock_irqrestore_rcu_node(sp, flags); } Loading Loading @@ -1198,7 +1196,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) raw_spin_unlock_irq_rcu_node(sp); if (pushgp) queue_delayed_work(system_power_efficient_wq, &sp->work, delay); queue_delayed_work(rcu_gp_wq, &sp->work, delay); } /* Loading kernel/rcu/tree.c +6 −0 Original line number Diff line number Diff line Loading @@ -4176,6 +4176,8 @@ static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp) pr_cont("\n"); } struct workqueue_struct *rcu_gp_wq; void __init rcu_init(void) { int cpu; Loading Loading @@ -4203,6 +4205,10 @@ void __init rcu_init(void) if (IS_ENABLED(CONFIG_TREE_SRCU)) srcu_online_cpu(cpu); } /* Create workqueue for expedited GPs and for Tree SRCU. */ rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); WARN_ON(!rcu_gp_wq); } #include "tree_exp.h" Loading kernel/rcu/tree_exp.h +1 −1 Original line number Diff line number Diff line Loading @@ -606,7 +606,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, rew.rew_rsp = rsp; rew.rew_s = s; INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); schedule_work(&rew.rew_work); queue_work(rcu_gp_wq, &rew.rew_work); } /* Wait for expedited grace period to complete. */ Loading Loading
kernel/rcu/rcu.h +1 −0 Original line number Diff line number Diff line Loading @@ -474,6 +474,7 @@ void show_rcu_gp_kthreads(void); void rcu_force_quiescent_state(void); void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; #endif /* #else #ifdef CONFIG_TINY_RCU */ #ifdef CONFIG_RCU_NOCB_CPU Loading
kernel/rcu/srcutree.c +3 −5 Original line number Diff line number Diff line Loading @@ -465,8 +465,7 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, */ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) { srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, &sdp->work, delay); srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); } /* Loading Loading @@ -664,8 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); srcu_gp_start(sp); queue_delayed_work(system_power_efficient_wq, &sp->work, srcu_get_delay(sp)); queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp)); } raw_spin_unlock_irqrestore_rcu_node(sp, flags); } Loading Loading @@ -1198,7 +1196,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) raw_spin_unlock_irq_rcu_node(sp); if (pushgp) queue_delayed_work(system_power_efficient_wq, &sp->work, delay); queue_delayed_work(rcu_gp_wq, &sp->work, delay); } /* Loading
kernel/rcu/tree.c +6 −0 Original line number Diff line number Diff line Loading @@ -4176,6 +4176,8 @@ static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp) pr_cont("\n"); } struct workqueue_struct *rcu_gp_wq; void __init rcu_init(void) { int cpu; Loading Loading @@ -4203,6 +4205,10 @@ void __init rcu_init(void) if (IS_ENABLED(CONFIG_TREE_SRCU)) srcu_online_cpu(cpu); } /* Create workqueue for expedited GPs and for Tree SRCU. */ rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0); WARN_ON(!rcu_gp_wq); } #include "tree_exp.h" Loading
kernel/rcu/tree_exp.h +1 −1 Original line number Diff line number Diff line Loading @@ -606,7 +606,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, rew.rew_rsp = rsp; rew.rew_s = s; INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); schedule_work(&rew.rew_work); queue_work(rcu_gp_wq, &rew.rew_work); } /* Wait for expedited grace period to complete. */ Loading