Advertisement
arter97

Untitled

Mar 24th, 2019
301
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 3.73 KB | None | 0 0
  1. diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
  2. index 9528c55a30674..eb4c89cda3900 100644
  3. --- a/kernel/sched/fair.c
  4. +++ b/kernel/sched/fair.c
  5. @@ -3827,7 +3827,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  6.      * put back on, and if we advance min_vruntime, we'll be placed back
  7.      * further than we started -- ie. we'll be penalized.
  8.      */
  9. -   if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
  10. +   if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
  11.         update_min_vruntime(cfs_rq);
  12.  }
  13.  
  14. @@ -4304,9 +4304,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
  15.  
  16.     /*
  17.      * Add to the _head_ of the list, so that an already-started
  18. -    * distribute_cfs_runtime will not see us
  19. +    * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
  20. +    * not running add to the tail so that later runqueues don't get starved.
  21.      */
  22. -   list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
  23. +   if (cfs_b->distribute_running)
  24. +       list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
  25. +   else
  26. +       list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
  27.  
  28.     /*
  29.      * If we're the first throttled task, make sure the bandwidth
  30. @@ -4453,14 +4457,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
  31.      * in us over-using our runtime if it is all used during this loop, but
  32.      * only by limited amounts in that extreme case.
  33.      */
  34. -   while (throttled && cfs_b->runtime > 0) {
  35. +   while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
  36.         runtime = cfs_b->runtime;
  37. +       cfs_b->distribute_running = 1;
  38.         raw_spin_unlock(&cfs_b->lock);
  39.         /* we can't nest cfs_b->lock while distributing bandwidth */
  40.         runtime = distribute_cfs_runtime(cfs_b, runtime,
  41.                          runtime_expires);
  42.         raw_spin_lock(&cfs_b->lock);
  43.  
  44. +       cfs_b->distribute_running = 0;
  45.         throttled = !list_empty(&cfs_b->throttled_cfs_rq);
  46.  
  47.         cfs_b->runtime -= min(runtime, cfs_b->runtime);
  48. @@ -4571,6 +4577,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
  49.  
  50.     /* confirm we're still not at a refresh boundary */
  51.     raw_spin_lock(&cfs_b->lock);
  52. +   if (cfs_b->distribute_running) {
  53. +       raw_spin_unlock(&cfs_b->lock);
  54. +       return;
  55. +   }
  56. +
  57.     if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
  58.         raw_spin_unlock(&cfs_b->lock);
  59.         return;
  60. @@ -4580,6 +4591,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
  61.         runtime = cfs_b->runtime;
  62.  
  63.     expires = cfs_b->runtime_expires;
  64. +   if (runtime)
  65. +       cfs_b->distribute_running = 1;
  66. +
  67.     raw_spin_unlock(&cfs_b->lock);
  68.  
  69.     if (!runtime)
  70. @@ -4590,6 +4604,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
  71.     raw_spin_lock(&cfs_b->lock);
  72.     if (expires == cfs_b->runtime_expires)
  73.         cfs_b->runtime -= min(runtime, cfs_b->runtime);
  74. +   cfs_b->distribute_running = 0;
  75.     raw_spin_unlock(&cfs_b->lock);
  76.  }
  77.  
  78. @@ -4698,6 +4713,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
  79.     cfs_b->period_timer.function = sched_cfs_period_timer;
  80.     hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  81.     cfs_b->slack_timer.function = sched_cfs_slack_timer;
  82. +   cfs_b->distribute_running = 0;
  83.  }
  84.  
  85.  static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
  86. @@ -7304,10 +7320,10 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
  87.         !(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu)))
  88.         target_cpu = most_spare_cap_cpu;
  89.  
  90. -   if (target_cpu == -1 && cpu_isolated(prev_cpu) &&
  91. -           isolated_candidate != -1) {
  92. -       target_cpu = isolated_candidate;
  93. +   if (cpu_isolated(prev_cpu)) {
  94.         fbt_env->avoid_prev_cpu = true;
  95. +       if (target_cpu == -1 && isolated_candidate != -1)
  96. +           target_cpu = isolated_candidate;
  97.     }
  98.  
  99.     /*
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement