rcu: Fix RCU lockdep splat in set_task_cpu on fork path
authorPeter Zijlstra <[email protected]>
Wed, 21 Apr 2010 20:02:07 +0000 (13:02 -0700)
committerIngo Molnar <[email protected]>
Fri, 30 Apr 2010 10:03:17 +0000 (12:03 +0200)
Add an RCU read-side critical section to suppress this false
positive.

Located-by: Eric Paris <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Paul E. McKenney <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
LKML-Reference: <1271880131[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
kernel/sched.c

index de0bd26e520aad2076705806b9947836d1957049..3c2a54f70ffed7ca1e0954666e3d333e43d36b95 100644 (file)
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 {
+       /*
+        * Strictly speaking this rcu_read_lock() is not needed since the
+        * task_group is tied to the cgroup, which in turn can never go away
+        * as long as there are tasks attached to it.
+        *
+        * However since task_group() uses task_subsys_state() which is an
+        * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
+        */
+       rcu_read_lock();
 #ifdef CONFIG_FAIR_GROUP_SCHED
        p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
        p->se.parent = task_group(p)->se[cpu];
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
        p->rt.rt_rq  = task_group(p)->rt_rq[cpu];
        p->rt.parent = task_group(p)->rt_se[cpu];
 #endif
+       rcu_read_unlock();
 }
 
 #else