if (rq->nr_running >= 2)
goto need_kick;
+ rcu_read_lock();
for_each_domain(cpu, sd) {
struct sched_group *sg = sd->groups;
struct sched_group_power *sgp = sg->sgp;
int nr_busy = atomic_read(&sgp->nr_busy_cpus);
if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
- goto need_kick;
+ goto need_kick_unlock;
if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
&& (cpumask_first_and(nohz.idle_cpus_mask,
sched_domain_span(sd)) < cpu))
- goto need_kick;
+ goto need_kick_unlock;
if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
break;
}
+ rcu_read_unlock();
return 0;
+
+need_kick_unlock:
+ rcu_read_unlock();
need_kick:
return 1;
}