Advertisement
Guest User

Untitled

a guest
Oct 2nd, 2012
75
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 4.19 KB | None | 0 0
  1. --- a/include/linux/init_task.h
  2. +++ b/include/linux/init_task.h
  3. @@ -123,8 +123,17 @@ extern struct group_info init_groups;
  4.  
  5.  extern struct cred init_cred;
  6.  
  7. +extern struct task_group root_task_group;
  8. +
  9. +#ifdef CONFIG_CGROUP_SCHED
  10. +# define INIT_CGROUP_SCHED(tsk)                        \
  11. +   .sched_task_group = &root_task_group,
  12. +#else
  13. +# define INIT_CGROUP_SCHED(tsk)
  14. +#endif
  15. +
  16.  #ifdef CONFIG_PERF_EVENTS
  17. -# define INIT_PERF_EVENTS(tsk)                 \
  18. +# define INIT_PERF_EVENTS(tsk)                     \
  19.     .perf_event_mutex =                         \
  20.          __MUTEX_INITIALIZER(tsk.perf_event_mutex),     \
  21.     .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
  22. @@ -161,6 +170,7 @@ extern struct cred init_cred;
  23.     },                              \
  24.     .tasks      = LIST_HEAD_INIT(tsk.tasks),            \
  25.     INIT_PUSHABLE_TASKS(tsk)                    \
  26. +   INIT_CGROUP_SCHED(tsk)                      \
  27.     .ptraced    = LIST_HEAD_INIT(tsk.ptraced),          \
  28.     .ptrace_entry   = LIST_HEAD_INIT(tsk.ptrace_entry),     \
  29.     .real_parent    = &tsk,                     \
  30. --- a/include/linux/sched.h
  31. +++ b/include/linux/sched.h
  32. @@ -1244,6 +1244,9 @@ struct task_struct {
  33.     const struct sched_class *sched_class;
  34.     struct sched_entity se;
  35.     struct sched_rt_entity rt;
  36. +#ifdef CONFIG_CGROUP_SCHED
  37. +   struct task_group *sched_task_group;
  38. +#endif
  39.  
  40.  #ifdef CONFIG_PREEMPT_NOTIFIERS
  41.     /* list of struct preempt_notifier: */
  42. @@ -2723,7 +2726,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
  43.  extern long sched_group_rt_period(struct task_group *tg);
  44.  extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
  45.  #endif
  46. -#endif
  47. +#endif /* CONFIG_CGROUP_SCHED */
  48.  
  49.  extern int task_can_switch_user(struct user_struct *up,
  50.                     struct task_struct *tsk);
  51.  
  52. --- a/kernel/sched/core.c
  53. +++ b/kernel/sched/core.c
  54. @@ -1096,7 +1096,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  55.      * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
  56.      *
  57.      * sched_move_task() holds both and thus holding either pins the cgroup,
  58. -    * see set_task_rq().
  59. +    * see task_group().
  60.      *
  61.      * Furthermore, all task_rq users should acquire both locks, see
  62.      * task_rq_lock().
  63. @@ -7596,6 +7596,7 @@ void sched_destroy_group(struct task_group *tg)
  64.   */
  65.  void sched_move_task(struct task_struct *tsk)
  66.  {
  67. +   struct task_group *tg;
  68.     int on_rq, running;
  69.     unsigned long flags;
  70.     struct rq *rq;
  71. @@ -7610,6 +7611,12 @@ void sched_move_task(struct task_struct *tsk)
  72.     if (unlikely(running))
  73.         tsk->sched_class->put_prev_task(rq, tsk);
  74.  
  75. +   tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
  76. +               lockdep_is_held(&tsk->sighand->siglock)),
  77. +             struct task_group, css);
  78. +   tg = autogroup_task_group(tsk, tg);
  79. +   tsk->sched_task_group = tg;
  80. +
  81.  #ifdef CONFIG_FAIR_GROUP_SCHED
  82.     if (tsk->sched_class->task_move_group)
  83.         tsk->sched_class->task_move_group(tsk, on_rq);
  84.  
  85. --- a/kernel/sched/sched.h
  86. +++ b/kernel/sched/sched.h
  87. @@ -538,22 +538,19 @@ extern int group_balance_cpu(struct sched_group *sg);
  88.  /*
  89.   * Return the group to which this tasks belongs.
  90.   *
  91. - * We use task_subsys_state_check() and extend the RCU verification with
  92. - * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
  93. - * task it moves into the cgroup. Therefore by holding either of those locks,
  94. - * we pin the task to the current cgroup.
  95. + * We cannot use task_subsys_state() and friends because the cgroup
  96. + * subsystem changes that value before the cgroup_subsys::attach() method
  97. + * is called, therefore we cannot pin it and might observe the wrong value.
  98. + *
  99. + * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
  100. + * core changes this before calling sched_move_task().
  101. + *
  102. + * Instead we use a 'copy' which is updated from sched_move_task() while
  103. + * holding both task_struct::pi_lock and rq::lock.
  104.   */
  105.  static inline struct task_group *task_group(struct task_struct *p)
  106.  {
  107. -   struct task_group *tg;
  108. -   struct cgroup_subsys_state *css;
  109. -
  110. -   css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
  111. -           lockdep_is_held(&p->pi_lock) ||
  112. -           lockdep_is_held(&task_rq(p)->lock));
  113. -   tg = container_of(css, struct task_group, css);
  114. -
  115. -   return autogroup_task_group(p, tg);
  116. +   return p->sched_task_group;
  117.  }
  118.  
  119.  /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement