Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- --- a/kernel/sched/MuQSS.c
- +++ b/kernel/sched/MuQSS.c
- @@ -37,0 +38 @@
- +#include <linux/wait_bit.h>
- @@ -1706,8 +1707,2 @@ void sched_ttwu_pending(void)
- - while (llist) {
- - int wake_flags = 0;
- -
- - p = llist_entry(llist, struct task_struct, wake_entry);
- - llist = llist_next(llist);
- -
- - ttwu_do_activate(rq, p, wake_flags);
- - }
- + llist_for_each_entry_safe(p, t, llist, wake_entry)
- + ttwu_do_activate(rq, p, 0);
- @@ -6194,3 +6188,0 @@ int sched_cpu_deactivate(unsigned int cpu)
- - * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
- - * not imply sync_sched(), so wait for both.
- - *
- @@ -6199,4 +6191 @@ int sched_cpu_deactivate(unsigned int cpu)
- - if (IS_ENABLED(CONFIG_PREEMPT))
- - synchronize_rcu_mult(call_rcu, call_rcu_sched);
- - else
- - synchronize_rcu();
- + synchronize_rcu_mult(call_rcu, call_rcu_sched);
- @@ -6298 +6286,0 @@ void __init sched_init_smp(void)
- - alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
- @@ -6402 +6389,0 @@ void __init sched_init_smp(void)
- - sched_clock_init_late();
- @@ -6409 +6395,0 @@ void __init sched_init_smp(void)
- - sched_clock_init_late();
- @@ -6445,13 +6430,0 @@ static struct kmem_cache *task_group_cache __read_mostly;
- -#define WAIT_TABLE_BITS 8
- -#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
- -static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
- -
- -wait_queue_head_t *bit_waitqueue(void *word, int bit)
- -{
- - const int shift = BITS_PER_LONG == 32 ? 5 : 6;
- - unsigned long val = (unsigned long)word << shift | bit;
- -
- - return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
- -}
- -EXPORT_SYMBOL(bit_waitqueue);
- -
- @@ -6468,2 +6441 @@ void __init sched_init(void)
- - for (i = 0; i < WAIT_TABLE_SIZE; i++)
- - init_waitqueue_head(bit_wait_table + i);
- + wait_bit_init();
- @@ -504 +502,5 @@ struct sched_group_capacity {
- - unsigned long cpumask[0]; /* iteration mask */
- +#ifdef CONFIG_SCHED_DEBUG
- + int id;
- +#endif
- +
- + unsigned long cpumask[0]; /* balance mask */
- @@ -531,2 +533 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
- - * cpumask masking which cpus in the group are allowed to iterate up the domain
- - * tree.
- + * See build_balance_mask().
Add Comment
Please, Sign In to add comment