The rcu_report_exp_rdp() function is always invoked with its "wake"
argument set to "true", so this commit drops this parameter. The only
potential call site that would use "false" is in the code driving the
expedited grace period, and that code uses rcu_report_exp_cpu_mult()
instead, which therefore retains its "wake" parameter.
Signed-off-by: Paul E. McKenney <[email protected]>
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
-static void rcu_report_exp_rdp(struct rcu_state *rsp,
- struct rcu_data *rdp, bool wake);
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
static void sync_sched_exp_online_cleanup(int cpu);
/* rcuc/rcub kthread realtime priority */
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
return;
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(&rcu_sched_data), true);
+ rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
}
void rcu_softirq_qs(void)
/* QS for any half-done expedited RCU-sched GP. */
preempt_disable();
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(rcu_sched_state.rda), true);
+ rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(rcu_sched_state.rda));
preempt_enable();
rcu_preempt_deferred_qs(current);
for_each_rcu_flavor(rsp)
/*
* Report expedited quiescent state for specified rcu_data (CPU).
*/
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
- bool wake)
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp)
{
WRITE_ONCE(rdp->deferred_qs, false);
- rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
+ rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true);
}
/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
return;
if (rcu_is_cpu_rrupt_from_idle()) {
rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(&rcu_sched_data), true);
+ this_cpu_ptr(&rcu_sched_data));
return;
}
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
if (!t->rcu_read_lock_nesting) {
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
rcu_dynticks_curr_cpu_in_eqs()) {
- rcu_report_exp_rdp(rsp, rdp, true);
+ rcu_report_exp_rdp(rsp, rdp);
} else {
rdp->deferred_qs = true;
resched_cpu(rdp->cpu);
* still in a quiescent state in any case.)
*/
if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
- rcu_report_exp_rdp(rdp->rsp, rdp, true);
+ rcu_report_exp_rdp(rdp->rsp, rdp);
else
WARN_ON_ONCE(rdp->deferred_qs);
}
*/
rcu_preempt_qs();
if (rdp->deferred_qs)
- rcu_report_exp_rdp(rcu_state_p, rdp, true);
+ rcu_report_exp_rdp(rcu_state_p, rdp);
}
/*
* blocked-tasks list below.
*/
if (rdp->deferred_qs) {
- rcu_report_exp_rdp(rcu_state_p, rdp, true);
+ rcu_report_exp_rdp(rcu_state_p, rdp);
if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);
return;