Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/drivers/cpufreq/cpufreq_zeneractive.c b/drivers/cpufreq/cpufreq_zeneractive.c
- index 00f11ff..730b733 100644
- --- a/drivers/cpufreq/cpufreq_zeneractive.c
- +++ b/drivers/cpufreq/cpufreq_zeneractive.c
- @@ -30,6 +30,8 @@
- #include <linux/workqueue.h>
- #include <linux/kthread.h>
- #include <linux/slab.h>
- +#include <linux/earlysuspend.h>
- +
- #include <asm/cputime.h>
- #define CREATE_TRACE_POINTS
- @@ -48,13 +50,13 @@ struct cpufreq_zeneractive_cpuinfo {
- * Measurement for how long cur_load has been
- * above and below unplug_load[cpu].
- */
- - unsigned long total_below_unplug_time[3];
- - unsigned long total_above_unplug_time[3];
- + unsigned long total_below_unplug_time;
- + unsigned long total_above_unplug_time;
- /*
- * Last time we were there checking unplug_time
- */
- - u64 last_time_below_unplug_time[3];
- - u64 last_time_above_unplug_time[3];
- + u64 last_time_below_unplug_time;
- + u64 last_time_above_unplug_time;
- struct cpufreq_policy *policy;
- struct cpufreq_frequency_table *freq_table;
- unsigned int target_freq;
- @@ -367,6 +369,7 @@ static void cpufreq_zeneractive_idle_end(void)
- static int cpufreq_zeneractive_hotplug_task(void *data)
- {
- u64 now;
- + cpumask_t tmp_mask;
- unsigned int cpu;
- unsigned long flags;
- struct cpufreq_zeneractive_cpuinfo *pcpu;
- @@ -387,69 +390,90 @@ static int cpufreq_zeneractive_hotplug_task(void *data)
- }
- set_current_state(TASK_RUNNING);
- + tmp_mask = hotplug_cpumask;
- cpumask_clear(&hotplug_cpumask);
- spin_unlock_irqrestore(&hotplug_cpumask_lock, flags);
- - pcpu = &per_cpu(cpuinfo, smp_processor_id());
- - smp_rmb();
- -
- - if (!pcpu->governor_enabled) {
- - continue;
- - }
- -
- get_cpu_idle_time_us(smp_processor_id(), &now);
- - /*
- - * Calculate how long we've been above or below the unplug_load
- - * per CPU, so we can see if it has exceeded the unplug or insert delay.
- - */
- - for_each_cpu(cpu, cpu_possible_mask) {
- - if (cpu == 0 || cpu > 3) continue;
- - if (pcpu->cur_load <= unplug_load[cpu - 1]) {
- - /* Below: reset above counter */
- - pcpu->total_above_unplug_time[cpu - 1] = 0;
- - pcpu->last_time_above_unplug_time[cpu - 1] = 0;
- - if (!pcpu->last_time_below_unplug_time[cpu - 1])
- - pcpu->last_time_below_unplug_time[cpu - 1] = now;
- - pcpu->total_below_unplug_time[cpu - 1] +=
- - now - pcpu->last_time_below_unplug_time[cpu - 1];
- - }
- - if (pcpu->cur_load > unplug_load[cpu - 1]) {
- - /* Above: reset below counter */
- - pcpu->total_below_unplug_time[cpu - 1] = 0;
- - pcpu->last_time_below_unplug_time[cpu - 1] = 0;
- - if (!pcpu->last_time_above_unplug_time[cpu - 1])
- - pcpu->last_time_above_unplug_time[cpu - 1] = now;
- - pcpu->total_above_unplug_time[cpu - 1] +=
- - now - pcpu->last_time_above_unplug_time[cpu - 1];
- - }
- - }
- + for_each_cpu(cpu, &tmp_mask) {
- + unsigned int j, avg_load;
- + unsigned int total_load = 0;
- - /*
- - * If CPU load is at or below the unplug load for CPU {#}
- - * remove it, otherwise add it.
- - */
- + pcpu = &per_cpu(cpuinfo, cpu);
- + smp_rmb();
- - /* Ensure it has been unplug_delay since our last attempt*/
- - for_each_online_cpu(cpu) {
- - if (cpu == 0 || cpu > 3)
- + if (!pcpu->governor_enabled)
- continue;
- - /* Have we been below unplug load for unplug_delay? */
- - if (pcpu->total_below_unplug_time[cpu - 1] > unplug_delay) {
- - mutex_lock(&set_speed_lock);
- - cpu_down(cpu);
- - mutex_unlock(&set_speed_lock);
- - }
- - }
- - for_each_cpu_not(cpu, cpu_online_mask) {
- - if (cpu == 0 || cpu > 3)
- - continue;
- - /* Have we been above unplug_load for insert delay? */
- - if (pcpu->total_above_unplug_time[cpu - 1] > insert_delay) {
- - mutex_lock(&set_speed_lock);
- - cpu_up(cpu);
- - mutex_unlock(&set_speed_lock);
- + /*
- + * Compute average CPU load
- + * Use cpu_online_mask to get the load across
- + * all online CPUs.
- + */
- + for_each_cpu(j, cpu_online_mask) {
- + struct cpufreq_zeneractive_cpuinfo *pjcpu =
- + &per_cpu(cpuinfo, j);
- +
- + total_load += pjcpu->cur_load;
- + }
- + avg_load = total_load / num_online_cpus();
- +
- + /*
- + * Determine which CPUs to remove/insert.
- + * Use cpu_possible_mask so we get online
- + * and offline CPUs.
- + */
- + for_each_possible_cpu(j) {
- + struct cpufreq_zeneractive_cpuinfo *pjcpu;
- +
- + if (j == 0)
- + continue;
- + else if (j > 3)
- + break;
- +
- + pjcpu = &per_cpu(cpuinfo, j);
- +
- + /*
- + * The logic for hotplugging works as follows:
- + * if avg_load <= unplug_load[cpu], reset timers
- + * about how long we've been ABOVE it and
- + * figure out how long it has been since we've
- + * been below unplug_load.
- + * Logic works the same for last time we were above
- + * unplug_load.
- + */
- + if (avg_load <= unplug_load[j - 1]) {
- + /* Below: reset above counter */
- + pjcpu->total_above_unplug_time = 0;
- + pjcpu->last_time_above_unplug_time = 0;
- + if (!pjcpu->last_time_below_unplug_time)
- + pjcpu->last_time_below_unplug_time = now;
- + pjcpu->total_below_unplug_time +=
- + now - pjcpu->last_time_below_unplug_time;
- + }
- + if (avg_load > unplug_load[j - 1]) {
- + /* Above: reset below counter */
- + pjcpu->total_below_unplug_time = 0;
- + pjcpu->last_time_below_unplug_time = 0;
- + if (!pjcpu->last_time_above_unplug_time)
- + pjcpu->last_time_above_unplug_time = now;
- + pjcpu->total_above_unplug_time +=
- + now - pjcpu->last_time_above_unplug_time;
- + }
- +
- + if (cpu_online(j) &&
- + pjcpu->total_below_unplug_time > unplug_delay) {
- + mutex_lock(&set_speed_lock);
- + cpu_down(j);
- + mutex_unlock(&set_speed_lock);
- + }
- + if (cpu_is_offline(j) &&
- + pjcpu->total_above_unplug_time > insert_delay) {
- + mutex_lock(&set_speed_lock);
- + cpu_up(j);
- + mutex_unlock(&set_speed_lock);
- + }
- }
- }
- }
- @@ -870,6 +894,30 @@ static struct attribute_group zeneractive_attr_group = {
- .name = "zeneractive",
- };
- +#ifdef CONFIG_EARLYSUSPEND
- +/*
- + * Enable all CPUs when waking up the device
- + */
- +static void zeneractive_late_resume(struct early_suspend *handler) {
- + unsigned int cpu;
- + struct cpufreq_zeneractive_cpuinfo *pcpu;
- +
- + for_each_cpu_not(cpu, cpu_online_mask) {
- + pcpu = &per_cpu(cpuinfo, cpu);
- + if (!pcpu->governor_enabled)
- + continue;
- + mutex_lock(&set_speed_lock);
- + cpu_up(cpu);
- + mutex_unlock(&set_speed_lock);
- + }
- +}
- +
- +static struct early_suspend zeneractive_early_suspend = {
- + .resume = zeneractive_late_resume,
- + .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
- +};
- +#endif
- +
- static int cpufreq_zeneractive_idle_notifier(struct notifier_block *nb,
- unsigned long val,
- void *data)
- @@ -922,12 +970,8 @@ static int cpufreq_governor_zeneractive(struct cpufreq_policy *policy,
- pcpu->hispeed_validate_time =
- pcpu->target_set_time;
- pcpu->governor_enabled = 1;
- - pcpu->total_below_unplug_time[0] = 0;
- - pcpu->total_below_unplug_time[1] = 0;
- - pcpu->total_below_unplug_time[2] = 0;
- - pcpu->last_time_below_unplug_time[0] = 0;
- - pcpu->last_time_below_unplug_time[1] = 0;
- - pcpu->last_time_below_unplug_time[2] = 0;
- + pcpu->total_below_unplug_time = 0;
- + pcpu->last_time_below_unplug_time = 0;
- pcpu->cur_load = 0;
- smp_wmb();
- pcpu->cpu_timer.expires =
- @@ -947,6 +991,9 @@ static int cpufreq_governor_zeneractive(struct cpufreq_policy *policy,
- if (rc)
- return rc;
- +#ifdef CONFIG_EARLYSUSPEND
- + register_early_suspend(&zeneractive_early_suspend);
- +#endif
- idle_notifier_register(&cpufreq_zeneractive_idle_nb);
- break;
- @@ -965,6 +1012,9 @@ static int cpufreq_governor_zeneractive(struct cpufreq_policy *policy,
- sysfs_remove_group(cpufreq_global_kobject,
- &zeneractive_attr_group);
- +#ifdef CONFIG_EARLYSUSPEND
- + unregister_early_suspend(&zeneractive_early_suspend);
- +#endif
- break;
- case CPUFREQ_GOV_LIMITS:
Advertisement
Add Comment
Please, Sign In to add comment