Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
- index 9528c55a30674..eb4c89cda3900 100644
- --- a/kernel/sched/fair.c
- +++ b/kernel/sched/fair.c
- @@ -3827,7 +3827,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- * put back on, and if we advance min_vruntime, we'll be placed back
- * further than we started -- ie. we'll be penalized.
- */
- - if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
- + if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
- update_min_vruntime(cfs_rq);
- }
- @@ -4304,9 +4304,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
- /*
- * Add to the _head_ of the list, so that an already-started
- - * distribute_cfs_runtime will not see us
- + * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
- + * not running add to the tail so that later runqueues don't get starved.
- */
- - list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
- + if (cfs_b->distribute_running)
- + list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
- + else
- + list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
- /*
- * If we're the first throttled task, make sure the bandwidth
- @@ -4453,14 +4457,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
- * in us over-using our runtime if it is all used during this loop, but
- * only by limited amounts in that extreme case.
- */
- - while (throttled && cfs_b->runtime > 0) {
- + while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
- runtime = cfs_b->runtime;
- + cfs_b->distribute_running = 1;
- raw_spin_unlock(&cfs_b->lock);
- /* we can't nest cfs_b->lock while distributing bandwidth */
- runtime = distribute_cfs_runtime(cfs_b, runtime,
- runtime_expires);
- raw_spin_lock(&cfs_b->lock);
- + cfs_b->distribute_running = 0;
- throttled = !list_empty(&cfs_b->throttled_cfs_rq);
- cfs_b->runtime -= min(runtime, cfs_b->runtime);
- @@ -4571,6 +4577,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
- /* confirm we're still not at a refresh boundary */
- raw_spin_lock(&cfs_b->lock);
- + if (cfs_b->distribute_running) {
- + raw_spin_unlock(&cfs_b->lock);
- + return;
- + }
- +
- if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
- raw_spin_unlock(&cfs_b->lock);
- return;
- @@ -4580,6 +4591,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
- runtime = cfs_b->runtime;
- expires = cfs_b->runtime_expires;
- + if (runtime)
- + cfs_b->distribute_running = 1;
- +
- raw_spin_unlock(&cfs_b->lock);
- if (!runtime)
- @@ -4590,6 +4604,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
- raw_spin_lock(&cfs_b->lock);
- if (expires == cfs_b->runtime_expires)
- cfs_b->runtime -= min(runtime, cfs_b->runtime);
- + cfs_b->distribute_running = 0;
- raw_spin_unlock(&cfs_b->lock);
- }
- @@ -4698,6 +4713,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
- cfs_b->period_timer.function = sched_cfs_period_timer;
- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cfs_b->slack_timer.function = sched_cfs_slack_timer;
- + cfs_b->distribute_running = 0;
- }
- static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- @@ -7304,10 +7320,10 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
- !(p->state == TASK_RUNNING && !idle_cpu(most_spare_cap_cpu)))
- target_cpu = most_spare_cap_cpu;
- - if (target_cpu == -1 && cpu_isolated(prev_cpu) &&
- - isolated_candidate != -1) {
- - target_cpu = isolated_candidate;
- + if (cpu_isolated(prev_cpu)) {
- fbt_env->avoid_prev_cpu = true;
- + if (target_cpu == -1 && isolated_candidate != -1)
- + target_cpu = isolated_candidate;
- }
- /*
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement