Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- --- a/include/linux/init_task.h
- +++ b/include/linux/init_task.h
- @@ -123,8 +123,17 @@ extern struct group_info init_groups;
- extern struct cred init_cred;
- +extern struct task_group root_task_group;
- +
- +#ifdef CONFIG_CGROUP_SCHED
- +# define INIT_CGROUP_SCHED(tsk) \
- + .sched_task_group = &root_task_group,
- +#else
- +# define INIT_CGROUP_SCHED(tsk)
- +#endif
- +
- #ifdef CONFIG_PERF_EVENTS
- -# define INIT_PERF_EVENTS(tsk) \
- +# define INIT_PERF_EVENTS(tsk) \
- .perf_event_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
- .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
- @@ -161,6 +170,7 @@ extern struct cred init_cred;
- }, \
- .tasks = LIST_HEAD_INIT(tsk.tasks), \
- INIT_PUSHABLE_TASKS(tsk) \
- + INIT_CGROUP_SCHED(tsk) \
- .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
- .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
- .real_parent = &tsk, \
- --- a/include/linux/sched.h
- +++ b/include/linux/sched.h
- @@ -1244,6 +1244,9 @@ struct task_struct {
- const struct sched_class *sched_class;
- struct sched_entity se;
- struct sched_rt_entity rt;
- +#ifdef CONFIG_CGROUP_SCHED
- + struct task_group *sched_task_group;
- +#endif
- #ifdef CONFIG_PREEMPT_NOTIFIERS
- /* list of struct preempt_notifier: */
- @@ -2723,7 +2726,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
- extern long sched_group_rt_period(struct task_group *tg);
- extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
- #endif
- -#endif
- +#endif /* CONFIG_CGROUP_SCHED */
- extern int task_can_switch_user(struct user_struct *up,
- struct task_struct *tsk);
- --- a/kernel/sched/core.c
- +++ b/kernel/sched/core.c
- @@ -1096,7 +1096,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
- * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
- *
- * sched_move_task() holds both and thus holding either pins the cgroup,
- - * see set_task_rq().
- + * see task_group().
- *
- * Furthermore, all task_rq users should acquire both locks, see
- * task_rq_lock().
- @@ -7596,6 +7596,7 @@ void sched_destroy_group(struct task_group *tg)
- */
- void sched_move_task(struct task_struct *tsk)
- {
- + struct task_group *tg;
- int on_rq, running;
- unsigned long flags;
- struct rq *rq;
- @@ -7610,6 +7611,12 @@ void sched_move_task(struct task_struct *tsk)
- if (unlikely(running))
- tsk->sched_class->put_prev_task(rq, tsk);
- + tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
- + lockdep_is_held(&tsk->sighand->siglock)),
- + struct task_group, css);
- + tg = autogroup_task_group(tsk, tg);
- + tsk->sched_task_group = tg;
- +
- #ifdef CONFIG_FAIR_GROUP_SCHED
- if (tsk->sched_class->task_move_group)
- tsk->sched_class->task_move_group(tsk, on_rq);
- --- a/kernel/sched/sched.h
- +++ b/kernel/sched/sched.h
- @@ -538,22 +538,19 @@ extern int group_balance_cpu(struct sched_group *sg);
- /*
- * Return the group to which this tasks belongs.
- *
- - * We use task_subsys_state_check() and extend the RCU verification with
- - * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
- - * task it moves into the cgroup. Therefore by holding either of those locks,
- - * we pin the task to the current cgroup.
- + * We cannot use task_subsys_state() and friends because the cgroup
- + * subsystem changes that value before the cgroup_subsys::attach() method
- + * is called, therefore we cannot pin it and might observe the wrong value.
- + *
- + * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
- + * core changes this before calling sched_move_task().
- + *
- + * Instead we use a 'copy' which is updated from sched_move_task() while
- + * holding both task_struct::pi_lock and rq::lock.
- */
- static inline struct task_group *task_group(struct task_struct *p)
- {
- - struct task_group *tg;
- - struct cgroup_subsys_state *css;
- -
- - css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
- - lockdep_is_held(&p->pi_lock) ||
- - lockdep_is_held(&task_rq(p)->lock));
- - tg = container_of(css, struct task_group, css);
- -
- - return autogroup_task_group(p, tg);
- + return p->sched_task_group;
- }
- /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement