Guest User

Untitled

a guest
Dec 13th, 2012
105
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 7.88 KB | None | 0 0
  1. diff --git a/drivers/cpufreq/cpufreq_zeneractive.c b/drivers/cpufreq/cpufreq_zeneractive.c
  2. index 00f11ff..730b733 100644
  3. --- a/drivers/cpufreq/cpufreq_zeneractive.c
  4. +++ b/drivers/cpufreq/cpufreq_zeneractive.c
  5. @@ -30,6 +30,8 @@
  6.  #include <linux/workqueue.h>
  7.  #include <linux/kthread.h>
  8.  #include <linux/slab.h>
  9. +#include <linux/earlysuspend.h>
  10. +
  11.  #include <asm/cputime.h>
  12.  
  13.  #define CREATE_TRACE_POINTS
  14. @@ -48,13 +50,13 @@ struct cpufreq_zeneractive_cpuinfo {
  15.      * Measurement for how long cur_load has been
  16.      * above and below unplug_load[cpu].
  17.      */
  18. -   unsigned long total_below_unplug_time[3];
  19. -   unsigned long total_above_unplug_time[3];
  20. +   unsigned long total_below_unplug_time;
  21. +   unsigned long total_above_unplug_time;
  22.     /*
  23.      * Last time we were there checking unplug_time
  24.      */
  25. -   u64 last_time_below_unplug_time[3];
  26. -   u64 last_time_above_unplug_time[3];
  27. +   u64 last_time_below_unplug_time;
  28. +   u64 last_time_above_unplug_time;
  29.     struct cpufreq_policy *policy;
  30.     struct cpufreq_frequency_table *freq_table;
  31.     unsigned int target_freq;
  32. @@ -367,6 +369,7 @@ static void cpufreq_zeneractive_idle_end(void)
  33.  static int cpufreq_zeneractive_hotplug_task(void *data)
  34.  {
  35.     u64 now;
  36. +   cpumask_t tmp_mask;
  37.     unsigned int cpu;
  38.     unsigned long flags;
  39.     struct cpufreq_zeneractive_cpuinfo *pcpu;
  40. @@ -387,69 +390,90 @@ static int cpufreq_zeneractive_hotplug_task(void *data)
  41.         }
  42.  
  43.         set_current_state(TASK_RUNNING);
  44. +       tmp_mask = hotplug_cpumask;
  45.         cpumask_clear(&hotplug_cpumask);
  46.         spin_unlock_irqrestore(&hotplug_cpumask_lock, flags);
  47.  
  48. -       pcpu = &per_cpu(cpuinfo, smp_processor_id());
  49. -       smp_rmb();
  50. -
  51. -       if (!pcpu->governor_enabled) {
  52. -           continue;
  53. -       }
  54. -
  55.         get_cpu_idle_time_us(smp_processor_id(), &now);
  56.  
  57. -       /*
  58. -        * Calculate how long we've been above or below the unplug_load
  59. -        * per CPU, so we can see if it has exceeded the unplug or insert delay.
  60. -        */
  61. -       for_each_cpu(cpu, cpu_possible_mask) {
  62. -           if (cpu == 0 || cpu > 3) continue;
  63. -           if (pcpu->cur_load <= unplug_load[cpu - 1]) {
  64. -               /* Below: reset above counter */
  65. -               pcpu->total_above_unplug_time[cpu - 1] = 0;
  66. -               pcpu->last_time_above_unplug_time[cpu - 1] = 0;
  67. -               if (!pcpu->last_time_below_unplug_time[cpu - 1])
  68. -                   pcpu->last_time_below_unplug_time[cpu - 1] = now;
  69. -               pcpu->total_below_unplug_time[cpu - 1] +=
  70. -                   now - pcpu->last_time_below_unplug_time[cpu - 1];
  71. -           }
  72. -           if (pcpu->cur_load > unplug_load[cpu - 1]) {
  73. -               /* Above: reset below counter */
  74. -               pcpu->total_below_unplug_time[cpu - 1] = 0;
  75. -               pcpu->last_time_below_unplug_time[cpu - 1] = 0;
  76. -               if (!pcpu->last_time_above_unplug_time[cpu - 1])
  77. -                   pcpu->last_time_above_unplug_time[cpu - 1] = now;
  78. -               pcpu->total_above_unplug_time[cpu - 1] +=
  79. -                   now - pcpu->last_time_above_unplug_time[cpu - 1];
  80. -           }
  81. -       }
  82. +       for_each_cpu(cpu, &tmp_mask) {
  83. +           unsigned int j, avg_load;
  84. +           unsigned int total_load = 0;
  85.  
  86. -       /*
  87. -        * If CPU load is at or below the unplug load for CPU {#}
  88. -        * remove it, otherwise add it.
  89. -        */
  90. +           pcpu = &per_cpu(cpuinfo, cpu);
  91. +           smp_rmb();
  92.  
  93. -        /* Ensure it has been unplug_delay since our last attempt*/
  94. -       for_each_online_cpu(cpu) {
  95. -           if (cpu == 0 || cpu > 3)
  96. +           if (!pcpu->governor_enabled)
  97.                 continue;
  98. -           /* Have we been below unplug load for unplug_delay? */
  99. -           if (pcpu->total_below_unplug_time[cpu - 1] > unplug_delay) {
  100. -               mutex_lock(&set_speed_lock);
  101. -               cpu_down(cpu);
  102. -               mutex_unlock(&set_speed_lock);
  103. -           }
  104. -       }
  105.  
  106. -       for_each_cpu_not(cpu, cpu_online_mask) {
  107. -           if (cpu == 0 || cpu > 3)
  108. -               continue;
  109. -           /* Have we been above unplug_load for insert delay? */
  110. -           if (pcpu->total_above_unplug_time[cpu - 1] > insert_delay) {
  111. -               mutex_lock(&set_speed_lock);
  112. -               cpu_up(cpu);
  113. -               mutex_unlock(&set_speed_lock);
  114. +           /*
  115. +            * Compute average CPU load
  116. +            * Use cpu_online_mask to get the load across
  117. +            * all online CPUs.
  118. +            */
  119. +           for_each_cpu(j, cpu_online_mask) {
  120. +               struct cpufreq_zeneractive_cpuinfo *pjcpu =
  121. +                   &per_cpu(cpuinfo, j);
  122. +
  123. +               total_load += pjcpu->cur_load;
  124. +           }
  125. +           avg_load = total_load / num_online_cpus();
  126. +
  127. +           /*
  128. +            * Determine which CPUs to remove/insert.
  129. +            * Use cpu_possible_mask so we get online
  130. +            * and offline CPUs.
  131. +            */
  132. +           for_each_possible_cpu(j) {
  133. +               struct cpufreq_zeneractive_cpuinfo *pjcpu;
  134. +
  135. +               if (j == 0)
  136. +                   continue;
  137. +               else if (j > 3)
  138. +                   break;
  139. +
  140. +               pjcpu = &per_cpu(cpuinfo, j);
  141. +
  142. +               /*
  143. +                * The logic for hotplugging works as follows:
  144. +                * if avg_load <= unplug_load[cpu], reset timers
  145. +                * about how long we've been ABOVE it and
  146. +                * figure out how long it has been since we've
  147. +                * been below unplug_load.
  148. +                * Logic works the same for last time we were above
  149. +                * unplug_load.
  150. +                */
  151. +               if (avg_load <= unplug_load[j - 1]) {
  152. +                   /* Below: reset above counter */
  153. +                   pjcpu->total_above_unplug_time = 0;
  154. +                   pjcpu->last_time_above_unplug_time = 0;
  155. +                   if (!pjcpu->last_time_below_unplug_time)
  156. +                       pjcpu->last_time_below_unplug_time = now;
  157. +                   pjcpu->total_below_unplug_time +=
  158. +                       now - pjcpu->last_time_below_unplug_time;
  159. +               }
  160. +               if (avg_load > unplug_load[j - 1]) {
  161. +                   /* Above: reset below counter */
  162. +                   pjcpu->total_below_unplug_time = 0;
  163. +                   pjcpu->last_time_below_unplug_time = 0;
  164. +                   if (!pjcpu->last_time_above_unplug_time)
  165. +                       pjcpu->last_time_above_unplug_time = now;
  166. +                   pjcpu->total_above_unplug_time +=
  167. +                       now - pjcpu->last_time_above_unplug_time;
  168. +               }
  169. +
  170. +               if (cpu_online(j) &&
  171. +                   pjcpu->total_below_unplug_time > unplug_delay) {
  172. +                   mutex_lock(&set_speed_lock);
  173. +                   cpu_down(j);
  174. +                   mutex_unlock(&set_speed_lock);
  175. +               }
  176. +               if (cpu_is_offline(j) &&
  177. +                   pjcpu->total_above_unplug_time > insert_delay) {
  178. +                   mutex_lock(&set_speed_lock);
  179. +                   cpu_up(j);
  180. +                   mutex_unlock(&set_speed_lock);
  181. +               }
  182.             }
  183.         }
  184.     }
  185. @@ -870,6 +894,30 @@ static struct attribute_group zeneractive_attr_group = {
  186.     .name = "zeneractive",
  187.  };
  188.  
  189. +#ifdef CONFIG_EARLYSUSPEND
  190. +/*
  191. + * Enable all CPUs when waking up the device
  192. + */
  193. +static void zeneractive_late_resume(struct early_suspend *handler) {
  194. +   unsigned int cpu;
  195. +   struct cpufreq_zeneractive_cpuinfo *pcpu;
  196. +
  197. +   for_each_cpu_not(cpu, cpu_online_mask) {
  198. +       pcpu = &per_cpu(cpuinfo, cpu);
  199. +       if (!pcpu->governor_enabled)
  200. +           continue;
  201. +       mutex_lock(&set_speed_lock);
  202. +       cpu_up(cpu);
  203. +       mutex_unlock(&set_speed_lock);
  204. +   }
  205. +}
  206. +
  207. +static struct early_suspend zeneractive_early_suspend = {
  208. +   .resume = zeneractive_late_resume,
  209. +   .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
  210. +};
  211. +#endif
  212. +
  213.  static int cpufreq_zeneractive_idle_notifier(struct notifier_block *nb,
  214.                          unsigned long val,
  215.                          void *data)
  216. @@ -922,12 +970,8 @@ static int cpufreq_governor_zeneractive(struct cpufreq_policy *policy,
  217.             pcpu->hispeed_validate_time =
  218.                 pcpu->target_set_time;
  219.             pcpu->governor_enabled = 1;
  220. -           pcpu->total_below_unplug_time[0] = 0;
  221. -           pcpu->total_below_unplug_time[1] = 0;
  222. -           pcpu->total_below_unplug_time[2] = 0;
  223. -           pcpu->last_time_below_unplug_time[0] = 0;
  224. -           pcpu->last_time_below_unplug_time[1] = 0;
  225. -           pcpu->last_time_below_unplug_time[2] = 0;
  226. +           pcpu->total_below_unplug_time = 0;
  227. +           pcpu->last_time_below_unplug_time = 0;
  228.             pcpu->cur_load = 0;
  229.             smp_wmb();
  230.             pcpu->cpu_timer.expires =
  231. @@ -947,6 +991,9 @@ static int cpufreq_governor_zeneractive(struct cpufreq_policy *policy,
  232.         if (rc)
  233.             return rc;
  234.  
  235. +#ifdef CONFIG_EARLYSUSPEND
  236. +       register_early_suspend(&zeneractive_early_suspend);
  237. +#endif
  238.         idle_notifier_register(&cpufreq_zeneractive_idle_nb);
  239.         break;
  240.  
  241. @@ -965,6 +1012,9 @@ static int cpufreq_governor_zeneractive(struct cpufreq_policy *policy,
  242.         sysfs_remove_group(cpufreq_global_kobject,
  243.                 &zeneractive_attr_group);
  244.  
  245. +#ifdef CONFIG_EARLYSUSPEND
  246. +       unregister_early_suspend(&zeneractive_early_suspend);
  247. +#endif
  248.         break;
  249.  
  250.     case CPUFREQ_GOV_LIMITS:
Advertisement
Add Comment
Please, Sign In to add comment