Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- [eamon@eamon-archpc starlte-endurance]$ diff -uNr --no-dereference --color=always kernel/sched/cpufreq_schedutil.c kernel/sched/cpufreq_blu_schedutil.c
- --- kernel/sched/cpufreq_schedutil.c 2018-12-14 10:57:31.879667167 +1100
- +++ kernel/sched/cpufreq_blu_schedutil.c 2018-12-14 17:15:00.921755191 +1100
- @@ -4,6 +4,9 @@
- * Copyright (C) 2016, Intel Corporation
- * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- + * Copyright (C) 2018, eng.stk
- + * changes for blu_schedutil: eng.stk <eng.stk@sapo.pt>
- + *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- @@ -12,11 +15,8 @@
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- #include <linux/cpufreq.h>
- +#include <linux/kthread.h>
- #include <linux/slab.h>
- -#include <linux/of.h>
- -#include <linux/cpu_pm.h>
- -
- -#include <trace/events/power.h>
- #include "sched.h"
- #include "tune.h"
- @@ -29,6 +29,7 @@
- #define cpufreq_enable_fast_switch(x)
- #define cpufreq_disable_fast_switch(x)
- #define LATENCY_MULTIPLIER (1000)
- +#define SUGOV_KTHREAD_PRIORITY 50
- struct sugov_tunables {
- struct gov_attr_set attr_set;
- @@ -52,8 +53,10 @@
- /* The next fields are only needed if fast switch cannot be used. */
- struct irq_work irq_work;
- - struct work_struct work;
- + struct kthread_work work;
- struct mutex work_lock;
- + struct kthread_worker worker;
- + struct task_struct *thread;
- bool work_in_progress;
- bool need_freq_update;
- @@ -80,24 +83,6 @@
- static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
- -/******************* exynos specific function *******************/
- -#define DEFAULT_EXPIRED_TIME 70
- -struct sugov_exynos {
- - /* for slack timer */
- - unsigned long min;
- - int enabled;
- - bool started;
- - int expired_time;
- - struct timer_list timer;
- -
- - /* pm_qos_class */
- - int qos_min_class;
- -};
- -static DEFINE_PER_CPU(struct sugov_exynos, sugov_exynos);
- -static void sugov_stop_slack(int cpu);
- -static void sugov_start_slack(int cpu);
- -static void sugov_update_min(struct cpufreq_policy *policy);
- -
- /************************ Governor internals ***********************/
- static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
- @@ -141,34 +126,10 @@
- return false;
- }
- -static int sugov_select_scaling_cpu(void)
- -{
- - int cpu;
- - cpumask_t mask;
- -
- - cpumask_clear(&mask);
- - cpumask_and(&mask, cpu_coregroup_mask(0), cpu_online_mask);
- -
- - /* Idle core of the boot cluster is selected to scaling cpu */
- - for_each_cpu(cpu, &mask)
- - if (idle_cpu(cpu))
- - return cpu;
- -
- - /* if panic_cpu is not Little core, mask will be empty */
- - if (unlikely(!cpumask_weight(&mask))) {
- - cpu = atomic_read(&panic_cpu);
- - if (cpu != PANIC_CPU_INVALID)
- - return cpu;
- - }
- -
- - return cpumask_weight(&mask) - 1;
- -}
- -
- static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
- unsigned int next_freq)
- {
- struct cpufreq_policy *policy = sg_policy->policy;
- - int cpu;
- if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
- /* Reset cached freq as next_freq isn't changed */
- @@ -188,29 +149,15 @@
- return;
- policy->cur = next_freq;
- - trace_cpu_frequency(next_freq, smp_processor_id());
- } else {
- - cpu = sugov_select_scaling_cpu();
- - if (cpu < 0)
- - return;
- -
- sg_policy->work_in_progress = true;
- - irq_work_queue_on(&sg_policy->irq_work, cpu);
- + irq_work_queue(&sg_policy->irq_work);
- }
- }
- -#ifdef CONFIG_FREQVAR_TUNE
- -unsigned int freqvar_tipping_point(int cpu, unsigned int freq);
- -#else
- -static inline unsigned int freqvar_tipping_point(int cpu, unsigned int freq)
- -{
- - return freq + (freq >> 2);
- -}
- -#endif
- -
- /**
- * get_next_freq - Compute a new frequency for a given cpufreq policy.
- - * @sg_policy: schedutil policy object to compute the new frequency for.
- + * @sg_policy: blu_schedutil policy object to compute the new frequency for.
- * @util: Current CPU utilization.
- * @max: CPU capacity.
- *
- @@ -235,9 +182,9 @@
- {
- struct cpufreq_policy *policy = sg_policy->policy;
- unsigned int freq = arch_scale_freq_invariant() ?
- - policy->max : policy->cur;
- + policy->cpuinfo.max_freq : policy->cur;
- - freq = freqvar_tipping_point(policy->cpu, freq) * util / max;
- + freq = (freq + (freq >> 2)) * util / max;
- if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
- return sg_policy->next_freq;
- @@ -254,27 +201,25 @@
- #endif
- }
- -extern unsigned int sched_rt_remove_ratio_for_freq;
- -
- static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time)
- {
- int cpu = smp_processor_id();
- - unsigned long max_cap;
- - unsigned long rt_avg = cpu_rq(cpu)->rt.avg.util_avg;
- + struct rq *rq = cpu_rq(cpu);
- + unsigned long max_cap, rt;
- + s64 delta;
- max_cap = arch_scale_cpu_capacity(NULL, cpu);
- + sched_avg_update(rq);
- + delta = time - rq->age_stamp;
- + if (unlikely(delta < 0))
- + delta = 0;
- + rt = div64_u64(rq->rt_avg, sched_avg_period() + delta);
- + rt = (rt * max_cap) >> SCHED_CAPACITY_SHIFT;
- +
- *util = boosted_cpu_util(cpu);
- -
- - if (sched_feat(UTIL_EST)) {
- - *util = max_t(unsigned long, *util,
- - READ_ONCE(cpu_rq(cpu)->cfs.avg.util_est.enqueued));
- - }
- -
- - if (sched_rt_remove_ratio_for_freq)
- - *util -= ((rt_avg * sched_rt_remove_ratio_for_freq) / 100);
- if (likely(use_pelt()))
- - *util = min(*util, max_cap);
- + *util = min((*util + rt), max_cap);
- *max = max_cap;
- }
- @@ -291,9 +236,6 @@
- if (delta_ns > TICK_NSEC)
- sg_cpu->iowait_boost = 0;
- }
- -
- - /* HACK: block iowait boost to avoid unnecessary setting max frequency */
- - sg_cpu->iowait_boost = 0;
- }
- static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
- @@ -370,7 +312,7 @@
- unsigned long util = 0, max = 1;
- unsigned int j;
- - for_each_cpu_and(j, policy->related_cpus, cpu_online_mask) {
- + for_each_cpu(j, policy->cpus) {
- struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
- unsigned long j_util, j_max;
- s64 delta_ns;
- @@ -434,7 +376,7 @@
- raw_spin_unlock(&sg_policy->update_lock);
- }
- -static void sugov_work(struct work_struct *work)
- +static void sugov_work(struct kthread_work *work)
- {
- struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
- @@ -451,55 +393,21 @@
- struct sugov_policy *sg_policy;
- sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
- - schedule_work_on(smp_processor_id(), &sg_policy->work);
- -}
- -
- -/************************ Governor externals ***********************/
- -static void update_min_rate_limit_us(struct sugov_policy *sg_policy);
- -void sugov_update_rate_limit_us(struct cpufreq_policy *policy,
- - int up_rate_limit_ms, int down_rate_limit_ms)
- -{
- - struct sugov_policy *sg_policy;
- - struct sugov_tunables *tunables;
- -
- - sg_policy = policy->governor_data;
- - if (!sg_policy)
- - return;
- -
- - tunables = sg_policy->tunables;
- - if (!tunables)
- - return;
- -
- - tunables->up_rate_limit_us = (unsigned int)(up_rate_limit_ms * USEC_PER_MSEC);
- - tunables->down_rate_limit_us = (unsigned int)(down_rate_limit_ms * USEC_PER_MSEC);
- -
- - sg_policy->up_rate_delay_ns = up_rate_limit_ms * NSEC_PER_MSEC;
- - sg_policy->down_rate_delay_ns = down_rate_limit_ms * NSEC_PER_MSEC;
- -
- - update_min_rate_limit_us(sg_policy);
- -}
- -int sugov_sysfs_add_attr(struct cpufreq_policy *policy, const struct attribute *attr)
- -{
- - struct sugov_policy *sg_policy;
- - struct sugov_tunables *tunables;
- -
- - sg_policy = policy->governor_data;
- - if (!sg_policy)
- - return -ENODEV;
- -
- - tunables = sg_policy->tunables;
- - if (!tunables)
- - return -ENODEV;
- -
- - return sysfs_create_file(&tunables->attr_set.kobj, attr);
- -}
- -
- -struct cpufreq_policy *sugov_get_attr_policy(struct gov_attr_set *attr_set)
- -{
- - struct sugov_policy *sg_policy = list_first_entry(&attr_set->policy_list,
- - typeof(*sg_policy), tunables_hook);
- - return sg_policy->policy;
- + /*
- + * For RT and deadline tasks, the blu_schedutil governor shoots the
- + * frequency to maximum. Special care must be taken to ensure that this
- + * kthread doesn't result in the same behavior.
- + *
- + * This is (mostly) guaranteed by the work_in_progress flag. The flag is
- + * updated only at the end of the sugov_work() function and before that
- + * the blu_schedutil governor rejects all other frequency scaling requests.
- + *
- + * There is a very rare case though, where the RT thread yields right
- + * after the work_in_progress flag is cleared. The effects of that are
- + * neglected for now.
- + */
- + kthread_queue_work(&sg_policy->worker, &sg_policy->work);
- }
- /************************** sysfs interface ************************/
- @@ -592,7 +500,7 @@
- /********************** cpufreq governor interface *********************/
- -static struct cpufreq_governor schedutil_gov;
- +static struct cpufreq_governor blu_schedutil_gov;
- static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
- {
- @@ -603,19 +511,64 @@
- return NULL;
- sg_policy->policy = policy;
- - init_irq_work(&sg_policy->irq_work, sugov_irq_work);
- - INIT_WORK(&sg_policy->work, sugov_work);
- - mutex_init(&sg_policy->work_lock);
- raw_spin_lock_init(&sg_policy->update_lock);
- return sg_policy;
- }
- static void sugov_policy_free(struct sugov_policy *sg_policy)
- {
- - mutex_destroy(&sg_policy->work_lock);
- kfree(sg_policy);
- }
- +static int sugov_kthread_create(struct sugov_policy *sg_policy)
- +{
- + struct task_struct *thread;
- + struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
- + struct cpufreq_policy *policy = sg_policy->policy;
- + int ret;
- +
- + /* kthread only required for slow path */
- + if (policy->fast_switch_enabled)
- + return 0;
- +
- + kthread_init_work(&sg_policy->work, sugov_work);
- + kthread_init_worker(&sg_policy->worker);
- + thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
- + "sugov:%d",
- + cpumask_first(policy->related_cpus));
- + if (IS_ERR(thread)) {
- + pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
- + return PTR_ERR(thread);
- + }
- +
- + ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m);
- + if (ret) {
- + kthread_stop(thread);
- + pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
- + return ret;
- + }
- +
- + sg_policy->thread = thread;
- + kthread_bind_mask(thread, policy->related_cpus);
- + init_irq_work(&sg_policy->irq_work, sugov_irq_work);
- + mutex_init(&sg_policy->work_lock);
- +
- + wake_up_process(thread);
- +
- + return 0;
- +}
- +
- +static void sugov_kthread_stop(struct sugov_policy *sg_policy)
- +{
- + /* kthread only required for slow path */
- + if (sg_policy->policy->fast_switch_enabled)
- + return;
- +
- + kthread_flush_worker(&sg_policy->worker);
- + kthread_stop(sg_policy->thread);
- + mutex_destroy(&sg_policy->work_lock);
- +}
- +
- static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
- {
- struct sugov_tunables *tunables;
- @@ -641,6 +594,7 @@
- {
- struct sugov_policy *sg_policy;
- struct sugov_tunables *tunables;
- + unsigned int lat;
- int ret = 0;
- /* State should be equivalent to EXIT */
- @@ -655,12 +609,16 @@
- goto disable_fast_switch;
- }
- + ret = sugov_kthread_create(sg_policy);
- + if (ret)
- + goto free_sg_policy;
- +
- mutex_lock(&global_tunables_lock);
- if (global_tunables) {
- if (WARN_ON(have_governor_per_policy())) {
- ret = -EINVAL;
- - goto free_sg_policy;
- + goto stop_kthread;
- }
- policy->governor_data = sg_policy;
- sg_policy->tunables = global_tunables;
- @@ -672,22 +630,15 @@
- tunables = sugov_tunables_alloc(sg_policy);
- if (!tunables) {
- ret = -ENOMEM;
- - goto free_sg_policy;
- + goto stop_kthread;
- }
- - if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
- - tunables->up_rate_limit_us = policy->up_transition_delay_us;
- - tunables->down_rate_limit_us = policy->down_transition_delay_us;
- - } else {
- - unsigned int lat;
- -
- - tunables->up_rate_limit_us = UP_LATENCY_MULTIPLIER;
- - tunables->down_rate_limit_us = DOWN_LATENCY_MULTIPLIER;
- - lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- - if (lat) {
- - tunables->up_rate_limit_us *= lat;
- - tunables->down_rate_limit_us *= lat;
- - }
- + tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
- + tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
- + lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- + if (lat) {
- + tunables->up_rate_limit_us *= lat;
- + tunables->down_rate_limit_us *= lat;
- }
- policy->governor_data = sg_policy;
- @@ -695,7 +646,7 @@
- ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
- get_governor_parent_kobj(policy), "%s",
- - schedutil_gov.name);
- + blu_schedutil_gov.name);
- if (ret)
- goto fail;
- @@ -707,6 +658,9 @@
- policy->governor_data = NULL;
- sugov_tunables_free(tunables);
- + stop_kthread:
- + sugov_kthread_stop(sg_policy);
- +
- free_sg_policy:
- mutex_unlock(&global_tunables_lock);
- @@ -732,43 +686,13 @@
- if (!count)
- sugov_tunables_free(tunables);
- - sugov_policy_free(sg_policy);
- mutex_unlock(&global_tunables_lock);
- -}
- -
- -#ifdef CONFIG_EXYNOS_HOTPLUG_GOVERNOR
- -int sugov_fast_start(struct cpufreq_policy *policy, unsigned int cpu)
- -{
- - struct sugov_policy *sg_policy;
- - struct sugov_cpu *sg_cpu;
- -
- - down_write(&policy->rwsem);
- - cpumask_set_cpu(cpu, policy->cpus);
- - sg_policy = policy->governor_data;
- - sg_cpu = &per_cpu(sugov_cpu, cpu);
- -
- - memset(sg_cpu, 0, sizeof(*sg_cpu));
- - sg_cpu->sg_policy = sg_policy;
- - sg_cpu->util = 0;
- - sg_cpu->max = 0;
- - sg_cpu->flags = 0;
- - sg_cpu->last_update = 0;
- - sg_cpu->iowait_boost = 0;
- - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
- - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- - sugov_update_shared);
- -
- - up_write(&policy->rwsem);
- + sugov_kthread_stop(sg_policy);
- + sugov_policy_free(sg_policy);
- - return 1;
- -}
- -#else
- -int sugov_fast_start(struct cpufreq_policy *policy, unsigned int cpu)
- -{
- - return 0;
- + cpufreq_disable_fast_switch(policy);
- }
- -#endif
- static int sugov_start(struct cpufreq_policy *policy)
- {
- @@ -791,15 +715,13 @@
- memset(sg_cpu, 0, sizeof(*sg_cpu));
- sg_cpu->sg_policy = sg_policy;
- - sg_cpu->flags = 0;
- - sugov_start_slack(cpu);
- + sg_cpu->flags = SCHED_CPUFREQ_DL;
- sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
- cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
- policy_is_shared(policy) ?
- sugov_update_shared :
- sugov_update_single);
- }
- -
- return 0;
- }
- @@ -808,43 +730,32 @@
- struct sugov_policy *sg_policy = policy->governor_data;
- unsigned int cpu;
- - for_each_cpu(cpu, policy->cpus) {
- - sugov_stop_slack(cpu);
- + for_each_cpu(cpu, policy->cpus)
- cpufreq_remove_update_util_hook(cpu);
- - }
- synchronize_sched();
- - irq_work_sync(&sg_policy->irq_work);
- - cancel_work_sync(&sg_policy->work);
- + if (!policy->fast_switch_enabled) {
- + irq_work_sync(&sg_policy->irq_work);
- + kthread_cancel_work_sync(&sg_policy->work);
- + }
- }
- static void sugov_limits(struct cpufreq_policy *policy)
- {
- struct sugov_policy *sg_policy = policy->governor_data;
- - mutex_lock(&global_tunables_lock);
- -
- - if (!sg_policy) {
- - mutex_unlock(&global_tunables_lock);
- - return;
- - }
- -
- if (!policy->fast_switch_enabled) {
- mutex_lock(&sg_policy->work_lock);
- cpufreq_policy_apply_limits(policy);
- mutex_unlock(&sg_policy->work_lock);
- }
- - sugov_update_min(policy);
- -
- sg_policy->need_freq_update = true;
- -
- - mutex_unlock(&global_tunables_lock);
- }
- -static struct cpufreq_governor schedutil_gov = {
- - .name = "schedutil",
- +static struct cpufreq_governor blu_schedutil_gov = {
- + .name = "blu_schedutil",
- .owner = THIS_MODULE,
- .init = sugov_init,
- .exit = sugov_exit,
- @@ -853,244 +764,15 @@
- .limits = sugov_limits,
- };
- -#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
- +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_BLU_SCHEDUTIL
- struct cpufreq_governor *cpufreq_default_governor(void)
- {
- - return &schedutil_gov;
- + return &blu_schedutil_gov;
- }
- #endif
- -static void sugov_update_min(struct cpufreq_policy *policy)
- -{
- - int cpu, max_cap;
- - struct sugov_exynos *sg_exynos;
- - int min_cap;
- -
- - max_cap = arch_scale_cpu_capacity(NULL, policy->cpu);
- -
- - /* min_cap is minimum value making higher frequency than policy->min */
- - min_cap = max_cap * policy->min / policy->max;
- - min_cap = (min_cap * 4 / 5) + 1;
- -
- - for_each_cpu(cpu, policy->cpus) {
- - sg_exynos = &per_cpu(sugov_exynos, cpu);
- - sg_exynos->min = min_cap;
- - }
- -}
- -
- -static void sugov_nop_timer(unsigned long data)
- -{
- - /*
- - * The purpose of slack-timer is to wake up the CPU from IDLE, in order
- - * to decrease its frequency if it is not set to minimum already.
- - *
- - * This is important for platforms where CPU with higher frequencies
- - * consume higher power even at IDLE.
- - */
- - trace_sugov_slack_func(smp_processor_id());
- -}
- -
- -static void sugov_start_slack(int cpu)
- -{
- - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
- -
- - if (!sg_exynos->enabled)
- - return;
- -
- - sg_exynos->min = ULONG_MAX;
- - sg_exynos->started = true;
- -}
- -
- -static void sugov_stop_slack(int cpu)
- -{
- - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
- -
- - sg_exynos->started = false;
- - if (timer_pending(&sg_exynos->timer))
- - del_timer_sync(&sg_exynos->timer);
- -}
- -
- -static s64 get_next_event_time_ms(void)
- -{
- - return ktime_to_us(tick_nohz_get_sleep_length());
- -}
- -
- -static int sugov_need_slack_timer(unsigned int cpu)
- -{
- - struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
- - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
- -
- - if (schedtune_cpu_boost(cpu))
- - return 0;
- -
- - if (sg_cpu->util > sg_exynos->min &&
- - get_next_event_time_ms() > sg_exynos->expired_time)
- - return 1;
- -
- - return 0;
- -}
- -
- -static int sugov_pm_notifier(struct notifier_block *self,
- - unsigned long action, void *v)
- -{
- - unsigned int cpu = raw_smp_processor_id();
- - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
- - struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
- - struct timer_list *timer = &sg_exynos->timer;
- -
- - if (!sg_exynos->started)
- - return NOTIFY_OK;
- -
- - switch (action) {
- - case CPU_PM_ENTER_PREPARE:
- - if (timer_pending(timer))
- - del_timer_sync(timer);
- -
- - if (sugov_need_slack_timer(cpu)) {
- - timer->expires = jiffies + msecs_to_jiffies(sg_exynos->expired_time);
- - add_timer_on(timer, cpu);
- - trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, 1);
- - }
- - break;
- -
- - case CPU_PM_ENTER:
- - if (timer_pending(timer) && !sugov_need_slack_timer(cpu)) {
- - del_timer_sync(timer);
- - trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, -1);
- - }
- - break;
- -
- - case CPU_PM_EXIT_POST:
- - if (timer_pending(timer) && (time_after(timer->expires, jiffies))) {
- - del_timer_sync(timer);
- - trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, -1);
- - }
- - break;
- - }
- -
- - return NOTIFY_OK;
- -}
- -
- -static struct notifier_block sugov_pm_nb = {
- - .notifier_call = sugov_pm_notifier,
- -};
- -
- -static int find_cpu_pm_qos_class(int pm_qos_class)
- -{
- - int cpu;
- -
- - for_each_possible_cpu(cpu) {
- - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
- -
- - if ((sg_exynos->qos_min_class == pm_qos_class) &&
- - cpumask_test_cpu(cpu, cpu_active_mask))
- - return cpu;
- - }
- -
- - pr_err("cannot find cpu of PM QoS class\n");
- - return -EINVAL;
- -}
- -
- -static int sugov_pm_qos_callback(struct notifier_block *nb,
- - unsigned long val, void *v)
- -{
- - struct sugov_cpu *sg_cpu;
- - struct cpufreq_policy *policy;
- - int pm_qos_class = *((int *)v);
- - unsigned int next_freq;
- - int cpu;
- -
- - cpu = find_cpu_pm_qos_class(pm_qos_class);
- - if (cpu < 0)
- - return NOTIFY_BAD;
- -
- - sg_cpu = &per_cpu(sugov_cpu, cpu);
- - if (!sg_cpu || !sg_cpu->sg_policy || !sg_cpu->sg_policy->policy)
- - return NOTIFY_BAD;
- -
- - next_freq = sg_cpu->sg_policy->next_freq;
- -
- - policy = cpufreq_cpu_get(cpu);
- - if (!policy)
- - return NOTIFY_BAD;
- -
- - if (val >= policy->cur) {
- - cpufreq_cpu_put(policy);
- - return NOTIFY_BAD;
- - }
- -
- - __cpufreq_driver_target(policy, next_freq, CPUFREQ_RELATION_L);
- -
- - cpufreq_cpu_put(policy);
- -
- - return NOTIFY_OK;
- -}
- -
- -static struct notifier_block sugov_min_qos_notifier = {
- - .notifier_call = sugov_pm_qos_callback,
- - .priority = INT_MIN,
- -};
- -
- -static int __init sugov_parse_dt(struct device_node *dn, int cpu)
- -{
- - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
- -
- - /* parsing slack info */
- - if (of_property_read_u32(dn, "enabled", &sg_exynos->enabled))
- - return -EINVAL;
- - if (sg_exynos->enabled)
- - if (of_property_read_u32(dn, "expired_time", &sg_exynos->expired_time))
- - sg_exynos->expired_time = DEFAULT_EXPIRED_TIME;
- -
- - /* parsing pm_qos_class info */
- - if (of_property_read_u32(dn, "qos_min_class", &sg_exynos->qos_min_class))
- - return -EINVAL;
- -
- - return 0;
- -}
- -
- -static void __init sugov_exynos_init(void)
- -{
- - int cpu, ret;
- - struct device_node *dn = NULL;
- - const char *buf;
- -
- - while ((dn = of_find_node_by_type(dn, "schedutil-domain"))) {
- - struct cpumask shared_mask;
- - /* Get shared cpus */
- - ret = of_property_read_string(dn, "shared-cpus", &buf);
- - if (ret)
- - goto exit;
- -
- - cpulist_parse(buf, &shared_mask);
- - for_each_cpu(cpu, &shared_mask)
- - if (sugov_parse_dt(dn, cpu))
- - goto exit;
- - }
- -
- - for_each_possible_cpu(cpu) {
- - struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
- -
- - if (!sg_exynos->enabled)
- - continue;
- -
- - /* Initialize slack-timer */
- - init_timer_pinned(&sg_exynos->timer);
- - sg_exynos->timer.function = sugov_nop_timer;
- - }
- -
- - pm_qos_add_notifier(PM_QOS_CLUSTER0_FREQ_MIN, &sugov_min_qos_notifier);
- - pm_qos_add_notifier(PM_QOS_CLUSTER1_FREQ_MIN, &sugov_min_qos_notifier);
- - cpu_pm_register_notifier(&sugov_pm_nb);
- -
- - return;
- -exit:
- - pr_info("%s: failed to initialized slack_timer, pm_qos handler\n", __func__);
- -}
- static int __init sugov_register(void)
- {
- - sugov_exynos_init();
- -
- - return cpufreq_register_governor(&schedutil_gov);
- + return cpufreq_register_governor(&blu_schedutil_gov);
- }
- fs_initcall(sugov_register);
- [eamon@eamon-archpc starlte-endurance]$
Add Comment
Please, Sign In to add comment