Advertisement
thewadegeek

AndroidGovernorPatch

Jul 6th, 2012
14,037
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 56.79 KB | None | 0 0
  1. diff -uNr Vanilla/drivers/cpufreq/cpufreq_interactivex.c New/drivers/cpufreq/cpufreq_interactivex.c
  2. --- Vanilla/drivers/cpufreq/cpufreq_interactivex.c  1969-12-31 18:00:00.000000000 -0600
  3. +++ New/drivers/cpufreq/cpufreq_interactivex.c  2012-06-30 19:56:47.302115034 -0500
  4. @@ -0,0 +1,404 @@
  5. +/*
  6. + * drivers/cpufreq/cpufreq_interactivex.c
  7. + *
  8. + * Copyright (C) 2010 Google, Inc.
  9. + *
  10. + * This software is licensed under the terms of the GNU General Public
  11. + * License version 2, as published by the Free Software Foundation, and
  12. + * may be copied, distributed, and modified under those terms.
  13. + *
  14. + * This program is distributed in the hope that it will be useful,
  15. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17. + * GNU General Public License for more details.
  18. + *
  19. + * Author: Mike Chan (mike@android.com) - modified for suspend/wake by imoseyon
  20. + *
  21. + */
  22. +
  23. +#include <linux/cpu.h>
  24. +#include <linux/cpumask.h>
  25. +#include <linux/cpufreq.h>
  26. +#include <linux/mutex.h>
  27. +#include <linux/sched.h>
  28. +#include <linux/tick.h>
  29. +#include <linux/timer.h>
  30. +#include <linux/workqueue.h>
  31. +#include <linux/earlysuspend.h>
  32. +
  33. +#include <asm/cputime.h>
  34. +
  35. +static void (*pm_idle_old)(void);
  36. +static atomic_t active_count = ATOMIC_INIT(0);
  37. +
  38. +static DEFINE_PER_CPU(struct timer_list, cpu_timer);
  39. +
  40. +static DEFINE_PER_CPU(u64, time_in_idle);
  41. +static DEFINE_PER_CPU(u64, idle_exit_time);
  42. +
  43. +static struct cpufreq_policy *policy;
  44. +static unsigned int target_freq;
  45. +
  46. +/* Workqueues handle frequency scaling */
  47. +static struct workqueue_struct *up_wq;
  48. +static struct workqueue_struct *down_wq;
  49. +static struct work_struct freq_scale_work;
  50. +
  51. +static u64 freq_change_time;
  52. +static u64 freq_change_time_in_idle;
  53. +
  54. +static cpumask_t work_cpumask;
  55. +
  56. +static unsigned int suspended = 0;
  57. +static unsigned int enabled = 0;
  58. +
  59. +/*
  60. + * The minimum ammount of time to spend at a frequency before we can ramp down,
  61. + * default is 50ms.
  62. + */
  63. +#define DEFAULT_MIN_SAMPLE_TIME 50000;
  64. +static unsigned long min_sample_time;
  65. +
  66. +#define FREQ_THRESHOLD 1024000;
  67. +static unsigned int freq_threshld;
  68. +
  69. +#define RESUME_SPEED 1024000;
  70. +static unsigned int resum_speed;
  71. +
  72. +static int cpufreq_governor_interactivex(struct cpufreq_policy *policy,
  73. +       unsigned int event);
  74. +
  75. +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX
  76. +static
  77. +#endif
  78. +struct cpufreq_governor cpufreq_gov_interactivex = {
  79. +   .name = "interactiveX",
  80. +   .governor = cpufreq_governor_interactivex,
  81. +#if defined(CONFIG_ARCH_MSM_SCORPION)
  82. +   .max_transition_latency = 8000000,
  83. +#else
  84. +   .max_transition_latency = 10000000,
  85. +#endif
  86. +   .owner = THIS_MODULE,
  87. +};
  88. +
  89. +static void cpufreq_interactivex_timer(unsigned long data)
  90. +{
  91. +   u64 delta_idle;
  92. +   u64 update_time;
  93. +   u64 *cpu_time_in_idle;
  94. +   u64 *cpu_idle_exit_time;
  95. +   struct timer_list *t;
  96. +
  97. +   u64 now_idle = get_cpu_idle_time_us(data,
  98. +                       &update_time);
  99. +
  100. +
  101. +   cpu_time_in_idle = &per_cpu(time_in_idle, data);
  102. +   cpu_idle_exit_time = &per_cpu(idle_exit_time, data);
  103. +
  104. +   if (update_time == *cpu_idle_exit_time)
  105. +       return;
  106. +
  107. +   delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle);
  108. +
  109. +   /* Scale up if there were no idle cycles since coming out of idle */
  110. +   if (delta_idle == 0) {
  111. +       if (policy->cur == policy->max)
  112. +           return;
  113. +
  114. +       if (nr_running() < 1)
  115. +           return;
  116. +
  117. +       target_freq = policy->max;
  118. +
  119. +       cpumask_set_cpu(data, &work_cpumask);
  120. +       queue_work(up_wq, &freq_scale_work);
  121. +       return;
  122. +   }
  123. +
  124. +   /*
  125. +    * There is a window where if the cpu utlization can go from low to high
  126. +    * between the timer expiring, delta_idle will be > 0 and the cpu will
  127. +    * be 100% busy, preventing idle from running, and this timer from
  128. +    * firing. So setup another timer to fire to check cpu utlization.
  129. +    * Do not setup the timer if there is no scheduled work.
  130. +    */
  131. +   t = &per_cpu(cpu_timer, data);
  132. +   if (!timer_pending(t) && nr_running() > 0) {
  133. +           *cpu_time_in_idle = get_cpu_idle_time_us(
  134. +                   data, cpu_idle_exit_time);
  135. +           mod_timer(t, jiffies + 2);
  136. +   }
  137. +
  138. +   if (policy->cur == policy->min)
  139. +       return;
  140. +
  141. +   /*
  142. +    * Do not scale down unless we have been at this frequency for the
  143. +    * minimum sample time.
  144. +    */
  145. +   if (cputime64_sub(update_time, freq_change_time) < min_sample_time)
  146. +       return;
  147. +
  148. +   target_freq = policy->min;
  149. +   cpumask_set_cpu(data, &work_cpumask);
  150. +   queue_work(down_wq, &freq_scale_work);
  151. +}
  152. +
  153. +static void cpufreq_idle(void)
  154. +{
  155. +   struct timer_list *t;
  156. +   u64 *cpu_time_in_idle;
  157. +   u64 *cpu_idle_exit_time;
  158. +
  159. +   pm_idle_old();
  160. +
  161. +   if (!cpumask_test_cpu(smp_processor_id(), policy->cpus))
  162. +           return;
  163. +
  164. +   /* Timer to fire in 1-2 ticks, jiffie aligned. */
  165. +   t = &per_cpu(cpu_timer, smp_processor_id());
  166. +   cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id());
  167. +   cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id());
  168. +
  169. +   if (timer_pending(t) == 0) {
  170. +       *cpu_time_in_idle = get_cpu_idle_time_us(
  171. +               smp_processor_id(), cpu_idle_exit_time);
  172. +       mod_timer(t, jiffies + 2);
  173. +   }
  174. +}
  175. +
  176. +/*
  177. + * Choose the cpu frequency based off the load. For now choose the minimum
  178. + * frequency that will satisfy the load, which is not always the lower power.
  179. + */
  180. +static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu)
  181. +{
  182. +   unsigned int delta_time;
  183. +   unsigned int idle_time;
  184. +   unsigned int cpu_load;
  185. +   unsigned int newfreq;
  186. +   u64 current_wall_time;
  187. +   u64 current_idle_time;;
  188. +
  189. +   current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time);
  190. +
  191. +   idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle;
  192. +   delta_time = (unsigned int) current_wall_time - freq_change_time;
  193. +
  194. +   cpu_load = 100 * (delta_time - idle_time) / delta_time;
  195. +
  196. +   if (cpu_load > 98) newfreq = policy->max;
  197. +   else newfreq = policy->cur * cpu_load / 100;   
  198. +
  199. +   return newfreq;
  200. +}
  201. +
  202. +
  203. +/* We use the same work function to sale up and down */
  204. +static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work)
  205. +{
  206. +   unsigned int cpu;
  207. +   unsigned int newtarget;
  208. +   cpumask_t tmp_mask = work_cpumask;
  209. +   newtarget = freq_threshld;
  210. +
  211. +   for_each_cpu(cpu, tmp_mask) {
  212. +     if (!suspended) {
  213. +       if (target_freq == policy->max) {
  214. +           if (nr_running() == 1) {
  215. +               cpumask_clear_cpu(cpu, &work_cpumask);
  216. +               return;
  217. +           }
  218. +//         __cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
  219. +           __cpufreq_driver_target(policy, newtarget, CPUFREQ_RELATION_H);
  220. +       } else {
  221. +           target_freq = cpufreq_interactivex_calc_freq(cpu);
  222. +           __cpufreq_driver_target(policy, target_freq,
  223. +                           CPUFREQ_RELATION_L);
  224. +       }
  225. +     }
  226. +     freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time);
  227. +     cpumask_clear_cpu(cpu, &work_cpumask);
  228. +   }
  229. +
  230. +
  231. +}
  232. +
  233. +static ssize_t show_min_sample_time(struct kobject *kobj,
  234. +               struct attribute *attr, char *buf)
  235. +{
  236. +   return sprintf(buf, "%lu\n", min_sample_time);
  237. +}
  238. +
  239. +static ssize_t store_min_sample_time(struct kobject *kobj,
  240. +           struct attribute *attr, const char *buf, size_t count)
  241. +{
  242. +   return strict_strtoul(buf, 0, &min_sample_time);
  243. +}
  244. +
  245. +static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
  246. +       show_min_sample_time, store_min_sample_time);
  247. +
  248. +static struct attribute *interactivex_attributes[] = {
  249. +   &min_sample_time_attr.attr,
  250. +   NULL,
  251. +};
  252. +
  253. +static struct attribute_group interactivex_attr_group = {
  254. +   .attrs = interactivex_attributes,
  255. +   .name = "interactiveX",
  256. +};
  257. +
  258. +static void interactivex_suspend(int suspend)
  259. +{
  260. +   unsigned int max_speed;
  261. +
  262. +   max_speed = resum_speed;
  263. +
  264. +   if (!enabled) return;
  265. +        if (!suspend) { // resume at max speed:
  266. +       suspended = 0;
  267. +                __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L);
  268. +                pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur);
  269. +        } else {
  270. +       suspended = 1;
  271. +                __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
  272. +                pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cur);
  273. +        }
  274. +}
  275. +
  276. +static void interactivex_early_suspend(struct early_suspend *handler) {
  277. +     interactivex_suspend(1);
  278. +}
  279. +
  280. +static void interactivex_late_resume(struct early_suspend *handler) {
  281. +     interactivex_suspend(0);
  282. +}
  283. +
  284. +static struct early_suspend interactivex_power_suspend = {
  285. +        .suspend = interactivex_early_suspend,
  286. +        .resume = interactivex_late_resume,
  287. +        .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
  288. +};
  289. +
  290. +static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy,
  291. +       unsigned int event)
  292. +{
  293. +   int rc;
  294. +   unsigned int min_freq = ~0;
  295. +   unsigned int max_freq = 0;
  296. +   unsigned int i;
  297. +   struct cpufreq_frequency_table *freq_table;
  298. +
  299. +   switch (event) {
  300. +   case CPUFREQ_GOV_START:
  301. +       if (!cpu_online(new_policy->cpu))
  302. +           return -EINVAL;
  303. +
  304. +       /*
  305. +        * Do not register the idle hook and create sysfs
  306. +        * entries if we have already done so.
  307. +        */
  308. +       if (atomic_inc_return(&active_count) > 1)
  309. +           return 0;
  310. +
  311. +       rc = sysfs_create_group(cpufreq_global_kobject,
  312. +               &interactivex_attr_group);
  313. +       if (rc)
  314. +           return rc;
  315. +
  316. +       pm_idle_old = pm_idle;
  317. +       pm_idle = cpufreq_idle;
  318. +       policy = new_policy;
  319. +       enabled = 1;
  320. +           register_early_suspend(&interactivex_power_suspend);
  321. +           pr_info("[imoseyon] interactiveX active\n");
  322. +       freq_table = cpufreq_frequency_get_table(new_policy->cpu);
  323. +       for (i = 0; (freq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
  324. +           unsigned int freq = freq_table[i].frequency;
  325. +           if (freq == CPUFREQ_ENTRY_INVALID) {
  326. +               continue;
  327. +           }
  328. +           if (freq < min_freq)   
  329. +               min_freq = freq;
  330. +           if (freq > max_freq)
  331. +               max_freq = freq;
  332. +       }
  333. +       resum_speed = freq_table[(i-1)/2].frequency > min_freq ? freq_table[(i-1)/2].frequency : max_freq;      //Value in midrange of available CPU frequencies if sufficient number of freq bins available
  334. +       freq_threshld = max_freq;
  335. +       break;
  336. +
  337. +   case CPUFREQ_GOV_STOP:
  338. +       if (atomic_dec_return(&active_count) > 1)
  339. +           return 0;
  340. +
  341. +       sysfs_remove_group(cpufreq_global_kobject,
  342. +               &interactivex_attr_group);
  343. +
  344. +       pm_idle = pm_idle_old;
  345. +       del_timer(&per_cpu(cpu_timer, new_policy->cpu));
  346. +       enabled = 0;
  347. +           unregister_early_suspend(&interactivex_power_suspend);
  348. +           pr_info("[imoseyon] interactiveX inactive\n");
  349. +           break;
  350. +
  351. +   case CPUFREQ_GOV_LIMITS:
  352. +       if (new_policy->max < new_policy->cur)
  353. +           __cpufreq_driver_target(new_policy,
  354. +                   new_policy->max, CPUFREQ_RELATION_H);
  355. +       else if (new_policy->min > new_policy->cur)
  356. +           __cpufreq_driver_target(new_policy,
  357. +                   new_policy->min, CPUFREQ_RELATION_L);
  358. +       break;
  359. +   }
  360. +   return 0;
  361. +}
  362. +
  363. +static int __init cpufreq_interactivex_init(void)
  364. +{
  365. +   unsigned int i;
  366. +   struct timer_list *t;
  367. +   min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
  368. +   resum_speed = RESUME_SPEED;
  369. +   freq_threshld = FREQ_THRESHOLD;
  370. +
  371. +   /* Initalize per-cpu timers */
  372. +   for_each_possible_cpu(i) {
  373. +       t = &per_cpu(cpu_timer, i);
  374. +       init_timer_deferrable(t);
  375. +       t->function = cpufreq_interactivex_timer;
  376. +       t->data = i;
  377. +   }
  378. +
  379. +   /* Scale up is high priority */
  380. +   up_wq = create_workqueue("kinteractive_up");
  381. +   down_wq = create_workqueue("knteractive_down");
  382. +
  383. +   INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work);
  384. +
  385. +        pr_info("[imoseyon] interactiveX enter\n");
  386. +   return cpufreq_register_governor(&cpufreq_gov_interactivex);
  387. +}
  388. +
  389. +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX
  390. +fs_initcall(cpufreq_interactivex_init);
  391. +#else
  392. +module_init(cpufreq_interactivex_init);
  393. +#endif
  394. +
  395. +static void __exit cpufreq_interactivex_exit(void)
  396. +{
  397. +        pr_info("[imoseyon] interactiveX exit\n");
  398. +   cpufreq_unregister_governor(&cpufreq_gov_interactivex);
  399. +   destroy_workqueue(up_wq);
  400. +   destroy_workqueue(down_wq);
  401. +}
  402. +
  403. +module_exit(cpufreq_interactivex_exit);
  404. +
  405. +MODULE_AUTHOR("Mike Chan <mike@android.com>");
  406. +MODULE_DESCRIPTION("'cpufreq_interactiveX' - A cpufreq governor for "
  407. +   "Latency sensitive workloads");
  408. +MODULE_LICENSE("GPL");
  409. diff -uNr Vanilla/drivers/cpufreq/cpufreq_lionheart.c New/drivers/cpufreq/cpufreq_lionheart.c
  410. --- Vanilla/drivers/cpufreq/cpufreq_lionheart.c 1969-12-31 18:00:00.000000000 -0600
  411. +++ New/drivers/cpufreq/cpufreq_lionheart.c 2012-06-30 19:56:47.302115034 -0500
  412. @@ -0,0 +1,543 @@
  413. +/*
  414. + * drivers/cpufreq/cpufreq_lionheart.c
  415. + *
  416. + * Patched & tweaked: knzo
  417. + *
  418. + * Based on the Conservative governor by:
  419. + *
  420. + *    Copyright (C)  2001 Russell King
  421. + *              (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  422. + *                        Jun Nakajima <jun.nakajima@intel.com>
  423. + *              (C)  2009 Alexander Clouter <alex@digriz.org.uk>
  424. + *
  425. + * This software is licensed under the terms of the GNU General Public
  426. + * License version 2, as published by the Free Software Foundation, and
  427. + * may be copied, distributed, and modified under those terms.
  428. + *
  429. + * This program is distributed in the hope that it will be useful,
  430. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  431. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  432. + * GNU General Public License for more details.
  433. + *
  434. + */
  435. +
  436. +#include <linux/kernel.h>
  437. +#include <linux/module.h>
  438. +#include <linux/init.h>
  439. +#include <linux/cpufreq.h>
  440. +#include <linux/cpu.h>
  441. +#include <linux/jiffies.h>
  442. +#include <linux/kernel_stat.h>
  443. +#include <linux/mutex.h>
  444. +#include <linux/hrtimer.h>
  445. +#include <linux/tick.h>
  446. +#include <linux/ktime.h>
  447. +#include <linux/sched.h>
  448. +
  449. +#define DEF_FREQUENCY_UP_THRESHOLD     (70)
  450. +#define DEF_FREQUENCY_DOWN_THRESHOLD       (30)
  451. +#define MIN_SAMPLING_RATE_RATIO            (2)
  452. +
  453. +static unsigned int min_sampling_rate;
  454. +
  455. +#define LATENCY_MULTIPLIER         (1000)
  456. +#define MIN_LATENCY_MULTIPLIER         (100)
  457. +#define DEF_SAMPLING_DOWN_FACTOR       (1)
  458. +#define MAX_SAMPLING_DOWN_FACTOR       (10)
  459. +#define TRANSITION_LATENCY_LIMIT       (10 * 1000 * 1000)
  460. +
  461. +static void do_dbs_timer(struct work_struct *work);
  462. +
  463. +struct cpu_dbs_info_s {
  464. +   cputime64_t prev_cpu_idle;
  465. +   cputime64_t prev_cpu_wall;
  466. +   cputime64_t prev_cpu_nice;
  467. +   struct cpufreq_policy *cur_policy;
  468. +   struct delayed_work work;
  469. +   unsigned int down_skip;
  470. +   unsigned int requested_freq;
  471. +   int cpu;
  472. +   unsigned int enable:1;
  473. +
  474. +   struct mutex timer_mutex;
  475. +};
  476. +static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
  477. +
  478. +static unsigned int dbs_enable;   
  479. +
  480. +static DEFINE_MUTEX(dbs_mutex);
  481. +
  482. +static struct dbs_tuners {
  483. +   unsigned int sampling_rate;
  484. +   unsigned int sampling_down_factor;
  485. +   unsigned int up_threshold;
  486. +   unsigned int down_threshold;
  487. +   unsigned int ignore_nice;
  488. +   unsigned int freq_step;
  489. +} dbs_tuners_ins = {
  490. +   .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
  491. +   .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
  492. +   .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
  493. +   .ignore_nice = 0,
  494. +   .freq_step = 5,
  495. +};
  496. +
  497. +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
  498. +                           cputime64_t *wall)
  499. +{
  500. +   cputime64_t idle_time;
  501. +   cputime64_t cur_wall_time;
  502. +   cputime64_t busy_time;
  503. +
  504. +   cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
  505. +   busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
  506. +           kstat_cpu(cpu).cpustat.system);
  507. +
  508. +   busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
  509. +   busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
  510. +   busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
  511. +   busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
  512. +
  513. +   idle_time = cputime64_sub(cur_wall_time, busy_time);
  514. +   if (wall)
  515. +       *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
  516. +
  517. +   return (cputime64_t)jiffies_to_usecs(idle_time);
  518. +}
  519. +
  520. +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
  521. +{
  522. +   u64 idle_time = get_cpu_idle_time_us(cpu, wall);
  523. +
  524. +   if (idle_time == -1ULL)
  525. +       return get_cpu_idle_time_jiffy(cpu, wall);
  526. +
  527. +   return idle_time;
  528. +}
  529. +
  530. +static int
  531. +dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  532. +            void *data)
  533. +{
  534. +   struct cpufreq_freqs *freq = data;
  535. +   struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
  536. +                           freq->cpu);
  537. +
  538. +   struct cpufreq_policy *policy;
  539. +
  540. +   if (!this_dbs_info->enable)
  541. +       return 0;
  542. +
  543. +   policy = this_dbs_info->cur_policy;
  544. +
  545. +   if (this_dbs_info->requested_freq > policy->max
  546. +           || this_dbs_info->requested_freq < policy->min)
  547. +       this_dbs_info->requested_freq = freq->new;
  548. +
  549. +   return 0;
  550. +}
  551. +
  552. +static struct notifier_block dbs_cpufreq_notifier_block = {
  553. +   .notifier_call = dbs_cpufreq_notifier
  554. +};
  555. +
  556. +static ssize_t show_sampling_rate_min(struct kobject *kobj,
  557. +                     struct attribute *attr, char *buf)
  558. +{
  559. +   return sprintf(buf, "%u\n", min_sampling_rate);
  560. +}
  561. +
  562. +define_one_global_ro(sampling_rate_min);
  563. +
  564. +#define show_one(file_name, object)                    \
  565. +static ssize_t show_##file_name                        \
  566. +(struct kobject *kobj, struct attribute *attr, char *buf)      \
  567. +{                                  \
  568. +   return sprintf(buf, "%u\n", dbs_tuners_ins.object);     \
  569. +}
  570. +
  571. +show_one(sampling_rate, sampling_rate);
  572. +show_one(sampling_down_factor, sampling_down_factor);
  573. +show_one(up_threshold, up_threshold);
  574. +show_one(down_threshold, down_threshold);
  575. +show_one(ignore_nice_load, ignore_nice);
  576. +show_one(freq_step, freq_step);
  577. +
  578. +static ssize_t store_sampling_down_factor(struct kobject *a,
  579. +                     struct attribute *b,
  580. +                     const char *buf, size_t count)
  581. +{
  582. +   unsigned int input;
  583. +   int ret;
  584. +   ret = sscanf(buf, "%u", &input);
  585. +
  586. +   if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
  587. +       return -EINVAL;
  588. +
  589. +   dbs_tuners_ins.sampling_down_factor = input;
  590. +   return count;
  591. +}
  592. +
  593. +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
  594. +                  const char *buf, size_t count)
  595. +{
  596. +   unsigned int input;
  597. +   int ret;
  598. +   ret = sscanf(buf, "%u", &input);
  599. +
  600. +   if (ret != 1)
  601. +       return -EINVAL;
  602. +
  603. +   dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
  604. +   return count;
  605. +}
  606. +
  607. +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
  608. +                 const char *buf, size_t count)
  609. +{
  610. +   unsigned int input;
  611. +   int ret;
  612. +   ret = sscanf(buf, "%u", &input);
  613. +
  614. +   if (ret != 1 || input > 100 ||
  615. +           input <= dbs_tuners_ins.down_threshold)
  616. +       return -EINVAL;
  617. +
  618. +   dbs_tuners_ins.up_threshold = input;
  619. +   return count;
  620. +}
  621. +
  622. +static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
  623. +                   const char *buf, size_t count)
  624. +{
  625. +   unsigned int input;
  626. +   int ret;
  627. +   ret = sscanf(buf, "%u", &input);
  628. +
  629. +   if (ret != 1 || input < 11 || input > 100 ||
  630. +           input >= dbs_tuners_ins.up_threshold)
  631. +       return -EINVAL;
  632. +
  633. +   dbs_tuners_ins.down_threshold = input;
  634. +   return count;
  635. +}
  636. +
  637. +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
  638. +                     const char *buf, size_t count)
  639. +{
  640. +   unsigned int input;
  641. +   int ret;
  642. +
  643. +   unsigned int j;
  644. +
  645. +   ret = sscanf(buf, "%u", &input);
  646. +   if (ret != 1)
  647. +       return -EINVAL;
  648. +
  649. +   if (input > 1)
  650. +       input = 1;
  651. +
  652. +   if (input == dbs_tuners_ins.ignore_nice)
  653. +       return count;
  654. +
  655. +   dbs_tuners_ins.ignore_nice = input;
  656. +
  657. +   for_each_online_cpu(j) {
  658. +       struct cpu_dbs_info_s *dbs_info;
  659. +       dbs_info = &per_cpu(cs_cpu_dbs_info, j);
  660. +       dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  661. +                       &dbs_info->prev_cpu_wall);
  662. +       if (dbs_tuners_ins.ignore_nice)
  663. +           dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
  664. +   }
  665. +   return count;
  666. +}
  667. +
  668. +static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
  669. +                  const char *buf, size_t count)
  670. +{
  671. +   unsigned int input;
  672. +   int ret;
  673. +   ret = sscanf(buf, "%u", &input);
  674. +
  675. +   if (ret != 1)
  676. +       return -EINVAL;
  677. +
  678. +   if (input > 100)
  679. +       input = 100;
  680. +
  681. +   dbs_tuners_ins.freq_step = input;
  682. +   return count;
  683. +}
  684. +
  685. +define_one_global_rw(sampling_rate);
  686. +define_one_global_rw(sampling_down_factor);
  687. +define_one_global_rw(up_threshold);
  688. +define_one_global_rw(down_threshold);
  689. +define_one_global_rw(ignore_nice_load);
  690. +define_one_global_rw(freq_step);
  691. +
  692. +static struct attribute *dbs_attributes[] = {
  693. +   &sampling_rate_min.attr,
  694. +   &sampling_rate.attr,
  695. +   &sampling_down_factor.attr,
  696. +   &up_threshold.attr,
  697. +   &down_threshold.attr,
  698. +   &ignore_nice_load.attr,
  699. +   &freq_step.attr,
  700. +   NULL
  701. +};
  702. +
  703. +static struct attribute_group dbs_attr_group = {
  704. +   .attrs = dbs_attributes,
  705. +   .name = "Lionheart",
  706. +};
  707. +
  708. +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
  709. +{
  710. +   unsigned int load = 0;
  711. +   unsigned int max_load = 0;
  712. +   unsigned int freq_target;
  713. +
  714. +   struct cpufreq_policy *policy;
  715. +   unsigned int j;
  716. +
  717. +   policy = this_dbs_info->cur_policy;
  718. +
  719. +   for_each_cpu(j, policy->cpus) {
  720. +       struct cpu_dbs_info_s *j_dbs_info;
  721. +       cputime64_t cur_wall_time, cur_idle_time;
  722. +       unsigned int idle_time, wall_time;
  723. +
  724. +       j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
  725. +
  726. +       cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
  727. +
  728. +       wall_time = (unsigned int) cputime64_sub(cur_wall_time,
  729. +               j_dbs_info->prev_cpu_wall);
  730. +       j_dbs_info->prev_cpu_wall = cur_wall_time;
  731. +
  732. +       idle_time = (unsigned int) cputime64_sub(cur_idle_time,
  733. +               j_dbs_info->prev_cpu_idle);
  734. +       j_dbs_info->prev_cpu_idle = cur_idle_time;
  735. +
  736. +       if (dbs_tuners_ins.ignore_nice) {
  737. +           cputime64_t cur_nice;
  738. +           unsigned long cur_nice_jiffies;
  739. +
  740. +           cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
  741. +                    j_dbs_info->prev_cpu_nice);
  742. +
  743. +           cur_nice_jiffies = (unsigned long)
  744. +                   cputime64_to_jiffies64(cur_nice);
  745. +
  746. +           j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
  747. +           idle_time += jiffies_to_usecs(cur_nice_jiffies);
  748. +       }
  749. +
  750. +       if (unlikely(!wall_time || wall_time < idle_time))
  751. +           continue;
  752. +
  753. +       load = 100 * (wall_time - idle_time) / wall_time;
  754. +
  755. +       if (load > max_load)
  756. +           max_load = load;
  757. +   }
  758. +
  759. +   if (dbs_tuners_ins.freq_step == 0)
  760. +       return;
  761. +
  762. +   if (max_load > dbs_tuners_ins.up_threshold) {
  763. +       this_dbs_info->down_skip = 0;
  764. +
  765. +       if (this_dbs_info->requested_freq == policy->max)
  766. +           return;
  767. +
  768. +       freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
  769. +
  770. +       if (unlikely(freq_target == 0))
  771. +           freq_target = 5;
  772. +
  773. +       this_dbs_info->requested_freq += freq_target;
  774. +       if (this_dbs_info->requested_freq > policy->max)
  775. +           this_dbs_info->requested_freq = policy->max;
  776. +
  777. +       __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
  778. +           CPUFREQ_RELATION_H);
  779. +       return;
  780. +   }
  781. +
  782. +   if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
  783. +       freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
  784. +
  785. +       this_dbs_info->requested_freq -= freq_target;
  786. +       if (this_dbs_info->requested_freq < policy->min)
  787. +           this_dbs_info->requested_freq = policy->min;
  788. +
  789. +       if (policy->cur == policy->min)
  790. +           return;
  791. +
  792. +       __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
  793. +               CPUFREQ_RELATION_H);
  794. +       return;
  795. +   }
  796. +}
  797. +
  798. +static void do_dbs_timer(struct work_struct *work)
  799. +{
  800. +   struct cpu_dbs_info_s *dbs_info =
  801. +       container_of(work, struct cpu_dbs_info_s, work.work);
  802. +   unsigned int cpu = dbs_info->cpu;
  803. +
  804. +   int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  805. +
  806. +   // delay -= jiffies % delay;
  807. +
  808. +   mutex_lock(&dbs_info->timer_mutex);
  809. +
  810. +   dbs_check_cpu(dbs_info);
  811. +
  812. +   schedule_delayed_work_on(cpu, &dbs_info->work, delay);
  813. +   mutex_unlock(&dbs_info->timer_mutex);
  814. +}
  815. +
  816. +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
  817. +{
  818. +   int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  819. +   // delay -= jiffies % delay;
  820. +
  821. +   dbs_info->enable = 1;
  822. +   INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
  823. +   schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
  824. +}
  825. +
  826. +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
  827. +{
  828. +   dbs_info->enable = 0;
  829. +   cancel_delayed_work_sync(&dbs_info->work);
  830. +}
  831. +
  832. +static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
  833. +                  unsigned int event)
  834. +{
  835. +   unsigned int cpu = policy->cpu;
  836. +   struct cpu_dbs_info_s *this_dbs_info;
  837. +   unsigned int j;
  838. +   int rc;
  839. +
  840. +   this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
  841. +
  842. +   switch (event) {
  843. +   case CPUFREQ_GOV_START:
  844. +       if ((!cpu_online(cpu)) || (!policy->cur))
  845. +           return -EINVAL;
  846. +
  847. +       mutex_lock(&dbs_mutex);
  848. +
  849. +       for_each_cpu(j, policy->cpus) {
  850. +           struct cpu_dbs_info_s *j_dbs_info;
  851. +           j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
  852. +           j_dbs_info->cur_policy = policy;
  853. +
  854. +           j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  855. +                       &j_dbs_info->prev_cpu_wall);
  856. +           if (dbs_tuners_ins.ignore_nice) {
  857. +               j_dbs_info->prev_cpu_nice =
  858. +                       kstat_cpu(j).cpustat.nice;
  859. +           }
  860. +       }
  861. +       this_dbs_info->down_skip = 0;
  862. +       this_dbs_info->requested_freq = policy->cur;
  863. +
  864. +       mutex_init(&this_dbs_info->timer_mutex);
  865. +       dbs_enable++;
  866. +
  867. +       if (dbs_enable == 1) {
  868. +           unsigned int latency;
  869. +
  870. +           latency = policy->cpuinfo.transition_latency / 1000;
  871. +           if (latency == 0)
  872. +               latency = 1;
  873. +
  874. +           rc = sysfs_create_group(cpufreq_global_kobject,
  875. +                       &dbs_attr_group);
  876. +           if (rc) {
  877. +               mutex_unlock(&dbs_mutex);
  878. +               return rc;
  879. +           }
  880. +
  881. +           min_sampling_rate = 10000;
  882. +           dbs_tuners_ins.sampling_rate = 10000;
  883. +
  884. +           cpufreq_register_notifier(
  885. +                   &dbs_cpufreq_notifier_block,
  886. +                   CPUFREQ_TRANSITION_NOTIFIER);
  887. +       }
  888. +       mutex_unlock(&dbs_mutex);
  889. +
  890. +       dbs_timer_init(this_dbs_info);
  891. +
  892. +       break;
  893. +
  894. +   case CPUFREQ_GOV_STOP:
  895. +       dbs_timer_exit(this_dbs_info);
  896. +
  897. +       mutex_lock(&dbs_mutex);
  898. +       dbs_enable--;
  899. +       mutex_destroy(&this_dbs_info->timer_mutex);
  900. +
  901. +       if (dbs_enable == 0)
  902. +           cpufreq_unregister_notifier(
  903. +                   &dbs_cpufreq_notifier_block,
  904. +                   CPUFREQ_TRANSITION_NOTIFIER);
  905. +
  906. +       mutex_unlock(&dbs_mutex);
  907. +       if (!dbs_enable)
  908. +           sysfs_remove_group(cpufreq_global_kobject,
  909. +                      &dbs_attr_group);
  910. +
  911. +       break;
  912. +
  913. +   case CPUFREQ_GOV_LIMITS:
  914. +       mutex_lock(&this_dbs_info->timer_mutex);
  915. +       if (policy->max < this_dbs_info->cur_policy->cur)
  916. +           __cpufreq_driver_target(
  917. +                   this_dbs_info->cur_policy,
  918. +                   policy->max, CPUFREQ_RELATION_H);
  919. +       else if (policy->min > this_dbs_info->cur_policy->cur)
  920. +           __cpufreq_driver_target(
  921. +                   this_dbs_info->cur_policy,
  922. +                   policy->min, CPUFREQ_RELATION_L);
  923. +       mutex_unlock(&this_dbs_info->timer_mutex);
  924. +
  925. +       break;
  926. +   }
  927. +   return 0;
  928. +}
  929. +
  930. +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART
  931. +static
  932. +#endif
  933. +struct cpufreq_governor cpufreq_gov_lionheart = {
  934. +   .name           = "Lionheart",
  935. +   .governor       = cpufreq_governor_dbs,
  936. +   .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  937. +   .owner          = THIS_MODULE,
  938. +};
  939. +
  940. +static int __init cpufreq_gov_dbs_init(void)
  941. +{
  942. +   return cpufreq_register_governor(&cpufreq_gov_lionheart);
  943. +}
  944. +
  945. +static void __exit cpufreq_gov_dbs_exit(void)
  946. +{
  947. +   cpufreq_unregister_governor(&cpufreq_gov_lionheart);
  948. +}
  949. +
  950. +MODULE_AUTHOR("knzo");
  951. +MODULE_DESCRIPTION("'cpufreq_lionheart' - A brave and agile conservative-based governor.");
  952. +MODULE_LICENSE("GPL");
  953. +
  954. +fs_initcall(cpufreq_gov_dbs_init);
  955. +module_exit(cpufreq_gov_dbs_exit);
  956. diff -uNr Vanilla/drivers/cpufreq/cpufreq_ondemandx.c New/drivers/cpufreq/cpufreq_ondemandx.c
  957. --- Vanilla/drivers/cpufreq/cpufreq_ondemandx.c 1969-12-31 18:00:00.000000000 -0600
  958. +++ New/drivers/cpufreq/cpufreq_ondemandx.c 2012-06-30 20:32:38.418143218 -0500
  959. @@ -0,0 +1,862 @@
  960. +/*
  961. +* drivers/cpufreq/cpufreq_ondemandx.c
  962. +*
  963. +* Copyright (C) 2001 Russell King
  964. +* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  965. +* Jun Nakajima <jun.nakajima@intel.com>
  966. +*
  967. +* This program is free software; you can redistribute it and/or modify
  968. +* it under the terms of the GNU General Public License version 2 as
  969. +* published by the Free Software Foundation.
  970. +*/
  971. +
  972. +#include <linux/kernel.h>
  973. +#include <linux/module.h>
  974. +#include <linux/init.h>
  975. +#include <linux/cpufreq.h>
  976. +#include <linux/cpu.h>
  977. +#include <linux/jiffies.h>
  978. +#include <linux/kernel_stat.h>
  979. +#include <linux/mutex.h>
  980. +#include <linux/hrtimer.h>
  981. +#include <linux/tick.h>
  982. +#include <linux/ktime.h>
  983. +#include <linux/sched.h>
  984. +#include <linux/earlysuspend.h>
  985. +
  986. +/*
  987. +* dbs is used in this file as a shortform for demandbased switching
  988. +* It helps to keep variable names smaller, simpler
  989. +*/
  990. +
  991. +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (15)
  992. +#define DEF_FREQUENCY_UP_THRESHOLD (85)
  993. +#define DEF_SAMPLING_DOWN_FACTOR (50)
  994. +#define MAX_SAMPLING_DOWN_FACTOR (100000)
  995. +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
  996. +#define MICRO_FREQUENCY_UP_THRESHOLD (95)
  997. +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
  998. +#define MIN_FREQUENCY_UP_THRESHOLD (11)
  999. +#define MAX_FREQUENCY_UP_THRESHOLD (100)
  1000. +#define DEF_SUSPEND_FREQ (384000)
  1001. +
  1002. +/*
  1003. +* The polling frequency of this governor depends on the capability of
  1004. +* the processor. Default polling frequency is 1000 times the transition
  1005. +* latency of the processor. The governor will work on any processor with
  1006. +* transition latency <= 10mS, using appropriate sampling
  1007. +* rate.
  1008. +* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
  1009. +* this governor will not work.
  1010. +* All times here are in uS.
  1011. +*/
  1012. +#define MIN_SAMPLING_RATE_RATIO (2)
  1013. +
  1014. +static unsigned int min_sampling_rate;
  1015. +
  1016. +#define LATENCY_MULTIPLIER (1000)
  1017. +#define MIN_LATENCY_MULTIPLIER (100)
  1018. +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
  1019. +
  1020. +static void do_dbs_timer(struct work_struct *work);
  1021. +static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
  1022. +unsigned int event);
  1023. +
  1024. +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX
  1025. +static
  1026. +#endif
  1027. +struct cpufreq_governor cpufreq_gov_ondemandx = {
  1028. +       .name = "ondemandx",
  1029. +       .governor = cpufreq_governor_dbs,
  1030. +       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  1031. +       .owner = THIS_MODULE,
  1032. +};
  1033. +
  1034. +/* Sampling types */
  1035. +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
  1036. +
  1037. +struct cpu_dbs_info_s {
  1038. +cputime64_t prev_cpu_idle;
  1039. +cputime64_t prev_cpu_iowait;
  1040. +cputime64_t prev_cpu_wall;
  1041. +cputime64_t prev_cpu_nice;
  1042. +struct cpufreq_policy *cur_policy;
  1043. +struct delayed_work work;
  1044. +struct cpufreq_frequency_table *freq_table;
  1045. +unsigned int freq_lo;
  1046. +unsigned int freq_lo_jiffies;
  1047. +unsigned int freq_hi_jiffies;
  1048. +unsigned int rate_mult;
  1049. +int cpu;
  1050. +unsigned int sample_type:1;
  1051. +/*
  1052. +* percpu mutex that serializes governor limit change with
  1053. +* do_dbs_timer invocation. We do not want do_dbs_timer to run
  1054. +* when user is changing the governor or limits.
  1055. +*/
  1056. +struct mutex timer_mutex;
  1057. +};
  1058. +static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
  1059. +
  1060. +/*
  1061. +* dbs_mutex protects dbs_enable in governor start/stop.
  1062. +*/
  1063. +static DEFINE_MUTEX(dbs_mutex);
  1064. +
  1065. +static struct dbs_tuners {
  1066. +unsigned int sampling_rate;
  1067. +unsigned int up_threshold;
  1068. +unsigned int down_differential;
  1069. +unsigned int ignore_nice;
  1070. +unsigned int sampling_down_factor;
  1071. +unsigned int powersave_bias;
  1072. +unsigned int io_is_busy;
  1073. +unsigned int suspend_freq;
  1074. +} dbs_tuners_ins = {
  1075. +.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
  1076. +.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
  1077. +.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
  1078. +.ignore_nice = 0,
  1079. +.powersave_bias = 50,
  1080. +.suspend_freq = DEF_SUSPEND_FREQ,
  1081. +};
  1082. +
  1083. +
  1084. +
  1085. +static unsigned int dbs_enable=0;  /* number of CPUs using this policy */
  1086. +
  1087. +// used for imoseyon's mods
  1088. +static unsigned int suspended = 0;
  1089. +static void ondemandx_suspend(int suspend)
  1090. +{
  1091. +        struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, smp_processor_id());
  1092. +        if (dbs_enable==0) return;
  1093. +        if (!suspend) { // resume at max speed:
  1094. +                suspended = 0;
  1095. +                __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->cur_policy->max,
  1096. +CPUFREQ_RELATION_L);
  1097. +                pr_info("[imoseyon] ondemandx awake at %d\n", dbs_info->cur_policy->cur);
  1098. +        } else {
  1099. +                suspended = 1;
  1100. +// let's give it a little breathing room
  1101. +                __cpufreq_driver_target(dbs_info->cur_policy, dbs_tuners_ins.suspend_freq, CPUFREQ_RELATION_H);
  1102. +                pr_info("[imoseyon] ondemandx suspended at %d\n", dbs_info->cur_policy->cur);
  1103. +        }
  1104. +}
  1105. +
  1106. +static void ondemandx_early_suspend(struct early_suspend *handler) {
  1107. +       ondemandx_suspend(1);
  1108. +}
  1109. +
  1110. +static void ondemandx_late_resume(struct early_suspend *handler) {
  1111. +       ondemandx_suspend(0);
  1112. +}
  1113. +
  1114. +static struct early_suspend ondemandx_power_suspend = {
  1115. +        .suspend = ondemandx_early_suspend,
  1116. +        .resume = ondemandx_late_resume,
  1117. +        .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
  1118. +};
  1119. +
  1120. +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
  1121. +cputime64_t *wall)
  1122. +{
  1123. +cputime64_t idle_time;
  1124. +cputime64_t cur_wall_time;
  1125. +cputime64_t busy_time;
  1126. +
  1127. +cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
  1128. +busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
  1129. +kstat_cpu(cpu).cpustat.system);
  1130. +
  1131. +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
  1132. +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
  1133. +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
  1134. +busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
  1135. +
  1136. +idle_time = cputime64_sub(cur_wall_time, busy_time);
  1137. +if (wall)
  1138. +*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
  1139. +
  1140. +return (cputime64_t)jiffies_to_usecs(idle_time);
  1141. +}
  1142. +
  1143. +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
  1144. +{
  1145. +u64 idle_time = get_cpu_idle_time_us(cpu, wall);
  1146. +
  1147. +if (idle_time == -1ULL)
  1148. +return get_cpu_idle_time_jiffy(cpu, wall);
  1149. +
  1150. +return idle_time;
  1151. +}
  1152. +
  1153. +static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
  1154. +{
  1155. +u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
  1156. +
  1157. +if (iowait_time == -1ULL)
  1158. +return 0;
  1159. +
  1160. +return iowait_time;
  1161. +}
  1162. +
  1163. +/*
  1164. +* Find right freq to be set now with powersave_bias on.
  1165. +* Returns the freq_hi to be used right now and will set freq_hi_jiffies,
  1166. +* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
  1167. +*/
  1168. +static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
  1169. +unsigned int freq_next,
  1170. +unsigned int relation)
  1171. +{
  1172. +unsigned int freq_req, freq_reduc, freq_avg;
  1173. +unsigned int freq_hi, freq_lo;
  1174. +unsigned int index = 0;
  1175. +unsigned int jiffies_total, jiffies_hi, jiffies_lo;
  1176. +struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  1177. +policy->cpu);
  1178. +
  1179. +if (!dbs_info->freq_table) {
  1180. +dbs_info->freq_lo = 0;
  1181. +dbs_info->freq_lo_jiffies = 0;
  1182. +return freq_next;
  1183. +}
  1184. +
  1185. +cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
  1186. +relation, &index);
  1187. +freq_req = dbs_info->freq_table[index].frequency;
  1188. +freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
  1189. +freq_avg = freq_req - freq_reduc;
  1190. +
  1191. +/* Find freq bounds for freq_avg in freq_table */
  1192. +index = 0;
  1193. +cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  1194. +CPUFREQ_RELATION_H, &index);
  1195. +freq_lo = dbs_info->freq_table[index].frequency;
  1196. +index = 0;
  1197. +cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
  1198. +CPUFREQ_RELATION_L, &index);
  1199. +freq_hi = dbs_info->freq_table[index].frequency;
  1200. +
  1201. +/* Find out how long we have to be in hi and lo freqs */
  1202. +if (freq_hi == freq_lo) {
  1203. +dbs_info->freq_lo = 0;
  1204. +dbs_info->freq_lo_jiffies = 0;
  1205. +return freq_lo;
  1206. +}
  1207. +jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  1208. +jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
  1209. +jiffies_hi += ((freq_hi - freq_lo) / 2);
  1210. +jiffies_hi /= (freq_hi - freq_lo);
  1211. +jiffies_lo = jiffies_total - jiffies_hi;
  1212. +dbs_info->freq_lo = freq_lo;
  1213. +dbs_info->freq_lo_jiffies = jiffies_lo;
  1214. +dbs_info->freq_hi_jiffies = jiffies_hi;
  1215. +return freq_hi;
  1216. +}
  1217. +
  1218. +static void ondemandx_powersave_bias_init_cpu(int cpu)
  1219. +{
  1220. +struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  1221. +dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
  1222. +dbs_info->freq_lo = 0;
  1223. +}
  1224. +
  1225. +static void ondemandx_powersave_bias_init(void)
  1226. +{
  1227. +int i;
  1228. +for_each_online_cpu(i) {
  1229. +ondemandx_powersave_bias_init_cpu(i);
  1230. +}
  1231. +}
  1232. +
  1233. +/************************** sysfs interface ************************/
  1234. +
  1235. +static ssize_t show_sampling_rate_min(struct kobject *kobj,
  1236. +struct attribute *attr, char *buf)
  1237. +{
  1238. +return sprintf(buf, "%u\n", min_sampling_rate);
  1239. +}
  1240. +
  1241. +define_one_global_ro(sampling_rate_min);
  1242. +
  1243. +/* cpufreq_ondemandx Governor Tunables */
  1244. +#define show_one(file_name, object) \
  1245. +static ssize_t show_##file_name \
  1246. +(struct kobject *kobj, struct attribute *attr, char *buf) \
  1247. +{ \
  1248. +return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
  1249. +}
  1250. +show_one(sampling_rate, sampling_rate);
  1251. +show_one(io_is_busy, io_is_busy);
  1252. +show_one(up_threshold, up_threshold);
  1253. +show_one(down_differential, down_differential);
  1254. +show_one(sampling_down_factor, sampling_down_factor);
  1255. +show_one(ignore_nice_load, ignore_nice);
  1256. +show_one(powersave_bias, powersave_bias);
  1257. +show_one(suspend_freq, suspend_freq);
  1258. +
  1259. +
  1260. +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
  1261. +const char *buf, size_t count)
  1262. +{
  1263. +unsigned int input;
  1264. +int ret;
  1265. +ret = sscanf(buf, "%u", &input);
  1266. +if (ret != 1)
  1267. +return -EINVAL;
  1268. +dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
  1269. +return count;
  1270. +}
  1271. +
  1272. +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
  1273. +const char *buf, size_t count)
  1274. +{
  1275. +unsigned int input;
  1276. +int ret;
  1277. +
  1278. +ret = sscanf(buf, "%u", &input);
  1279. +if (ret != 1)
  1280. +return -EINVAL;
  1281. +dbs_tuners_ins.io_is_busy = !!input;
  1282. +return count;
  1283. +}
  1284. +
  1285. +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
  1286. +const char *buf, size_t count)
  1287. +{
  1288. +unsigned int input;
  1289. +int ret;
  1290. +ret = sscanf(buf, "%u", &input);
  1291. +
  1292. +if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
  1293. +input < MIN_FREQUENCY_UP_THRESHOLD) {
  1294. +return -EINVAL;
  1295. +}
  1296. +dbs_tuners_ins.up_threshold = input;
  1297. +return count;
  1298. +}
  1299. +
  1300. +static ssize_t store_sampling_down_factor(struct kobject *a,
  1301. +struct attribute *b, const char *buf, size_t count)
  1302. +{
  1303. +unsigned int input, j;
  1304. +int ret;
  1305. +ret = sscanf(buf, "%u", &input);
  1306. +
  1307. +if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
  1308. +return -EINVAL;
  1309. +dbs_tuners_ins.sampling_down_factor = input;
  1310. +
  1311. +/* Reset down sampling multiplier in case it was active */
  1312. +for_each_online_cpu(j) {
  1313. +struct cpu_dbs_info_s *dbs_info;
  1314. +dbs_info = &per_cpu(od_cpu_dbs_info, j);
  1315. +dbs_info->rate_mult = 1;
  1316. +}
  1317. +return count;
  1318. +}
  1319. +
  1320. +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
  1321. +const char *buf, size_t count)
  1322. +{
  1323. +unsigned int input;
  1324. +int ret;
  1325. +
  1326. +unsigned int j;
  1327. +
  1328. +ret = sscanf(buf, "%u", &input);
  1329. +if (ret != 1)
  1330. +return -EINVAL;
  1331. +
  1332. +if (input > 1)
  1333. +input = 1;
  1334. +
  1335. +if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
  1336. +return count;
  1337. +}
  1338. +dbs_tuners_ins.ignore_nice = input;
  1339. +
  1340. +/* we need to re-evaluate prev_cpu_idle */
  1341. +for_each_online_cpu(j) {
  1342. +struct cpu_dbs_info_s *dbs_info;
  1343. +dbs_info = &per_cpu(od_cpu_dbs_info, j);
  1344. +dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  1345. +&dbs_info->prev_cpu_wall);
  1346. +if (dbs_tuners_ins.ignore_nice)
  1347. +dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
  1348. +
  1349. +}
  1350. +return count;
  1351. +}
  1352. +
  1353. +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
  1354. +const char *buf, size_t count)
  1355. +{
  1356. +unsigned int input;
  1357. +int ret;
  1358. +ret = sscanf(buf, "%u", &input);
  1359. +
  1360. +if (ret != 1)
  1361. +return -EINVAL;
  1362. +
  1363. +if (input > 1000)
  1364. +input = 1000;
  1365. +
  1366. +dbs_tuners_ins.powersave_bias = input;
  1367. +ondemandx_powersave_bias_init();
  1368. +return count;
  1369. +}
  1370. +
  1371. +
  1372. +
  1373. +static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
  1374. +const char *buf, size_t count)
  1375. +{
  1376. +unsigned int input;
  1377. +int ret;
  1378. +ret = sscanf(buf, "%u", &input);
  1379. +
  1380. +if (ret != 1)
  1381. +return -EINVAL;
  1382. +
  1383. +if (input > 30)
  1384. +input = 30;
  1385. +
  1386. +if (input < 0)
  1387. +input = 0;
  1388. +
  1389. +mutex_lock(&dbs_mutex);
  1390. +dbs_tuners_ins.down_differential = input;
  1391. +mutex_unlock(&dbs_mutex);
  1392. +
  1393. +return count;
  1394. +}
  1395. +
  1396. +static ssize_t store_suspend_freq(struct kobject *a, struct attribute *b,
  1397. +const char *buf, size_t count)
  1398. +{
  1399. +unsigned int input;
  1400. +int ret;
  1401. +ret = sscanf(buf, "%u", &input);
  1402. +
  1403. +if (ret != 1)
  1404. +return -EINVAL;
  1405. +
  1406. +if (input > 2016000)
  1407. +input = 2016000;
  1408. +
  1409. +if (input < 122000)
  1410. +input = 122000;
  1411. +
  1412. +mutex_lock(&dbs_mutex);
  1413. +dbs_tuners_ins.suspend_freq = input;
  1414. +mutex_unlock(&dbs_mutex);
  1415. +
  1416. +return count;
  1417. +}
  1418. +
  1419. +define_one_global_rw(sampling_rate);
  1420. +define_one_global_rw(io_is_busy);
  1421. +define_one_global_rw(up_threshold);
  1422. +define_one_global_rw(down_differential);
  1423. +define_one_global_rw(sampling_down_factor);
  1424. +define_one_global_rw(ignore_nice_load);
  1425. +define_one_global_rw(powersave_bias);
  1426. +define_one_global_rw(suspend_freq);
  1427. +
  1428. +static struct attribute *dbs_attributes[] = {
  1429. +&sampling_rate_min.attr,
  1430. +&sampling_rate.attr,
  1431. +&up_threshold.attr,
  1432. +&down_differential.attr,
  1433. +&sampling_down_factor.attr,
  1434. +&ignore_nice_load.attr,
  1435. +&powersave_bias.attr,
  1436. +&io_is_busy.attr,
  1437. +&suspend_freq.attr,
  1438. +NULL
  1439. +};
  1440. +
  1441. +static struct attribute_group dbs_attr_group = {
  1442. +.attrs = dbs_attributes,
  1443. +.name = "ondemandx",
  1444. +};
  1445. +
  1446. +/************************** sysfs end ************************/
  1447. +
  1448. +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
  1449. +{
  1450. +if (dbs_tuners_ins.powersave_bias)
  1451. +freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
  1452. +else if (p->cur == p->max)
  1453. +return;
  1454. +if (suspended && freq > dbs_tuners_ins.suspend_freq) {
  1455. +freq = dbs_tuners_ins.suspend_freq;
  1456. +__cpufreq_driver_target(p, freq, CPUFREQ_RELATION_H);
  1457. +} else
  1458. +__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
  1459. +                        CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
  1460. +}
  1461. +
  1462. +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
  1463. +{
  1464. +unsigned int max_load_freq;
  1465. +
  1466. +struct cpufreq_policy *policy;
  1467. +unsigned int j;
  1468. +
  1469. +this_dbs_info->freq_lo = 0;
  1470. +policy = this_dbs_info->cur_policy;
  1471. +
  1472. +/*
  1473. +* Every sampling_rate, we check, if current idle time is less
  1474. +* than 20% (default), then we try to increase frequency
  1475. +* Every sampling_rate, we look for a the lowest
  1476. +* frequency which can sustain the load while keeping idle time over
  1477. +* 30%. If such a frequency exist, we try to decrease to this frequency.
  1478. +*
  1479. +* Any frequency increase takes it to the maximum frequency.
  1480. +* Frequency reduction happens at minimum steps of
  1481. +* 5% (default) of current frequency
  1482. +*/
  1483. +
  1484. +/* Get Absolute Load - in terms of freq */
  1485. +max_load_freq = 0;
  1486. +
  1487. +for_each_cpu(j, policy->cpus) {
  1488. +struct cpu_dbs_info_s *j_dbs_info;
  1489. +cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
  1490. +unsigned int idle_time, wall_time, iowait_time;
  1491. +unsigned int load, load_freq;
  1492. +int freq_avg;
  1493. +
  1494. +j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
  1495. +
  1496. +cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
  1497. +cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
  1498. +
  1499. +wall_time = (unsigned int) cputime64_sub(cur_wall_time,
  1500. +j_dbs_info->prev_cpu_wall);
  1501. +j_dbs_info->prev_cpu_wall = cur_wall_time;
  1502. +
  1503. +idle_time = (unsigned int) cputime64_sub(cur_idle_time,
  1504. +j_dbs_info->prev_cpu_idle);
  1505. +j_dbs_info->prev_cpu_idle = cur_idle_time;
  1506. +
  1507. +iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
  1508. +j_dbs_info->prev_cpu_iowait);
  1509. +j_dbs_info->prev_cpu_iowait = cur_iowait_time;
  1510. +
  1511. +if (dbs_tuners_ins.ignore_nice) {
  1512. +cputime64_t cur_nice;
  1513. +unsigned long cur_nice_jiffies;
  1514. +
  1515. +cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
  1516. +j_dbs_info->prev_cpu_nice);
  1517. +/*
  1518. +* Assumption: nice time between sampling periods will
  1519. +* be less than 2^32 jiffies for 32 bit sys
  1520. +*/
  1521. +cur_nice_jiffies = (unsigned long)
  1522. +cputime64_to_jiffies64(cur_nice);
  1523. +
  1524. +j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
  1525. +idle_time += jiffies_to_usecs(cur_nice_jiffies);
  1526. +}
  1527. +
  1528. +/*
  1529. +* For the purpose of ondemandx, waiting for disk IO is an
  1530. +* indication that you're performance critical, and not that
  1531. +* the system is actually idle. So subtract the iowait time
  1532. +* from the cpu idle time.
  1533. +*/
  1534. +
  1535. +if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
  1536. +idle_time -= iowait_time;
  1537. +
  1538. +if (unlikely(!wall_time || wall_time < idle_time))
  1539. +continue;
  1540. +
  1541. +load = 100 * (wall_time - idle_time) / wall_time;
  1542. +
  1543. +freq_avg = __cpufreq_driver_getavg(policy, j);
  1544. +if (freq_avg <= 0)
  1545. +freq_avg = policy->cur;
  1546. +
  1547. +load_freq = load * freq_avg;
  1548. +if (load_freq > max_load_freq)
  1549. +max_load_freq = load_freq;
  1550. +}
  1551. +
  1552. +/* Check for frequency increase */
  1553. +if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
  1554. +/* If switching to max speed, apply sampling_down_factor */
  1555. +if (policy->cur < policy->max)
  1556. +this_dbs_info->rate_mult =
  1557. +dbs_tuners_ins.sampling_down_factor;
  1558. +dbs_freq_increase(policy, policy->max);
  1559. +return;
  1560. +}
  1561. +
  1562. +/* Check for frequency decrease */
  1563. +/* if we cannot reduce the frequency anymore, break out early */
  1564. +if (policy->cur == policy->min)
  1565. +return;
  1566. +
  1567. +/*
  1568. +* The optimal frequency is the frequency that is the lowest that
  1569. +* can support the current CPU usage without triggering the up
  1570. +* policy. To be safe, we focus 10 points under the threshold.
  1571. +*/
  1572. +if (max_load_freq <
  1573. +(dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
  1574. +policy->cur) {
  1575. +unsigned int freq_next;
  1576. +freq_next = max_load_freq /
  1577. +(dbs_tuners_ins.up_threshold -
  1578. +dbs_tuners_ins.down_differential);
  1579. +
  1580. +/* No longer fully busy, reset rate_mult */
  1581. +this_dbs_info->rate_mult = 1;
  1582. +
  1583. +if (freq_next < policy->min)
  1584. +freq_next = policy->min;
  1585. +
  1586. +if (!dbs_tuners_ins.powersave_bias) {
  1587. +__cpufreq_driver_target(policy, freq_next,
  1588. +CPUFREQ_RELATION_L);
  1589. +} else {
  1590. +int freq = powersave_bias_target(policy, freq_next,
  1591. +CPUFREQ_RELATION_L);
  1592. +__cpufreq_driver_target(policy, freq,
  1593. +CPUFREQ_RELATION_L);
  1594. +}
  1595. +}
  1596. +}
  1597. +
  1598. +static void do_dbs_timer(struct work_struct *work)
  1599. +{
  1600. +struct cpu_dbs_info_s *dbs_info =
  1601. +container_of(work, struct cpu_dbs_info_s, work.work);
  1602. +unsigned int cpu = dbs_info->cpu;
  1603. +int sample_type = dbs_info->sample_type;
  1604. +
  1605. +int delay;
  1606. +
  1607. +mutex_lock(&dbs_info->timer_mutex);
  1608. +
  1609. +/* Common NORMAL_SAMPLE setup */
  1610. +dbs_info->sample_type = DBS_NORMAL_SAMPLE;
  1611. +if (!dbs_tuners_ins.powersave_bias ||
  1612. +sample_type == DBS_NORMAL_SAMPLE) {
  1613. +dbs_check_cpu(dbs_info);
  1614. +if (dbs_info->freq_lo) {
  1615. +/* Setup timer for SUB_SAMPLE */
  1616. +dbs_info->sample_type = DBS_SUB_SAMPLE;
  1617. +delay = dbs_info->freq_hi_jiffies;
  1618. +} else {
  1619. +/* We want all CPUs to do sampling nearly on
  1620. +* same jiffy
  1621. +*/
  1622. +delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
  1623. +* dbs_info->rate_mult);
  1624. +
  1625. +if (num_online_cpus() > 1)
  1626. +delay -= jiffies % delay;
  1627. +}
  1628. +} else {
  1629. +if (!suspended)
  1630. +__cpufreq_driver_target(dbs_info->cur_policy,
  1631. +dbs_info->freq_lo, CPUFREQ_RELATION_H);
  1632. +delay = dbs_info->freq_lo_jiffies;
  1633. +}
  1634. +schedule_delayed_work_on(cpu, &dbs_info->work, delay);
  1635. +mutex_unlock(&dbs_info->timer_mutex);
  1636. +}
  1637. +
  1638. +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
  1639. +{
  1640. +/* We want all CPUs to do sampling nearly on same jiffy */
  1641. +int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  1642. +
  1643. +if (num_online_cpus() > 1)
  1644. +delay -= jiffies % delay;
  1645. +
  1646. +dbs_info->sample_type = DBS_NORMAL_SAMPLE;
  1647. +INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
  1648. +schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
  1649. +}
  1650. +
  1651. +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
  1652. +{
  1653. +cancel_delayed_work_sync(&dbs_info->work);
  1654. +}
  1655. +
  1656. +/*
  1657. +* Not all CPUs want IO time to be accounted as busy; this dependson how
  1658. +* efficient idling at a higher frequency/voltage is.
  1659. +* Pavel Machek says this is not so for various generations of AMD and old
  1660. +* Intel systems.
  1661. +* Mike Chan (androidlcom) calis this is also not true for ARM.
  1662. +* Because of this, whitelist specific known (series) of CPUs by default, and
  1663. +* leave all others up to the user.
  1664. +*/
  1665. +static int should_io_be_busy(void)
  1666. +{
  1667. +#if defined(CONFIG_X86)
  1668. +/*
  1669. +* For Intel, Core 2 (model 15) andl later have an efficient idle.
  1670. +*/
  1671. +if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
  1672. +boot_cpu_data.x86 == 6 &&
  1673. +boot_cpu_data.x86_model >= 15)
  1674. +return 1;
  1675. +#endif
  1676. +return 0;
  1677. +}
  1678. +
  1679. +static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
  1680. +unsigned int event)
  1681. +{
  1682. +unsigned int cpu = policy->cpu;
  1683. +struct cpu_dbs_info_s *this_dbs_info;
  1684. +unsigned int j;
  1685. +int rc;
  1686. +
  1687. +this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  1688. +
  1689. +switch (event) {
  1690. +case CPUFREQ_GOV_START:
  1691. +if ((!cpu_online(cpu)) || (!policy->cur))
  1692. +return -EINVAL;
  1693. +
  1694. +mutex_lock(&dbs_mutex);
  1695. +
  1696. +dbs_enable++;
  1697. +for_each_cpu(j, policy->cpus) {
  1698. +struct cpu_dbs_info_s *j_dbs_info;
  1699. +j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
  1700. +j_dbs_info->cur_policy = policy;
  1701. +
  1702. +j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  1703. +&j_dbs_info->prev_cpu_wall);
  1704. +if (dbs_tuners_ins.ignore_nice) {
  1705. +j_dbs_info->prev_cpu_nice =
  1706. +kstat_cpu(j).cpustat.nice;
  1707. +}
  1708. +}
  1709. +this_dbs_info->cpu = cpu;
  1710. +this_dbs_info->rate_mult = 1;
  1711. +ondemandx_powersave_bias_init_cpu(cpu);
  1712. +/*
  1713. +* Start the timerschedule work, when this governor
  1714. +* is used for first time
  1715. +*/
  1716. +if (dbs_enable == 1) {
  1717. +unsigned int latency;
  1718. +
  1719. +rc = sysfs_create_group(cpufreq_global_kobject,
  1720. +&dbs_attr_group);
  1721. +if (rc) {
  1722. +mutex_unlock(&dbs_mutex);
  1723. +return rc;
  1724. +}
  1725. +
  1726. +/* policy latency is in nS. Convert it to uS first */
  1727. +latency = policy->cpuinfo.transition_latency / 1000;
  1728. +if (latency == 0)
  1729. +latency = 1;
  1730. +/* Bring kernel and HW constraints together */
  1731. +min_sampling_rate = max(min_sampling_rate,
  1732. +MIN_LATENCY_MULTIPLIER * latency);
  1733. +dbs_tuners_ins.sampling_rate =
  1734. +max(min_sampling_rate,
  1735. +latency * LATENCY_MULTIPLIER);
  1736. +dbs_tuners_ins.io_is_busy = should_io_be_busy();
  1737. +}
  1738. +mutex_unlock(&dbs_mutex);
  1739. +
  1740. +mutex_init(&this_dbs_info->timer_mutex);
  1741. +dbs_timer_init(this_dbs_info);
  1742. +                register_early_suspend(&ondemandx_power_suspend);
  1743. +                pr_info("[imoseyon] ondemandx active\n");
  1744. +break;
  1745. +
  1746. +case CPUFREQ_GOV_STOP:
  1747. +dbs_timer_exit(this_dbs_info);
  1748. +
  1749. +mutex_lock(&dbs_mutex);
  1750. +mutex_destroy(&this_dbs_info->timer_mutex);
  1751. +dbs_enable--;
  1752. +mutex_unlock(&dbs_mutex);
  1753. +if (!dbs_enable)
  1754. +sysfs_remove_group(cpufreq_global_kobject,
  1755. +&dbs_attr_group);
  1756. +                unregister_early_suspend(&ondemandx_power_suspend);
  1757. +                pr_info("[imoseyon] ondemandx inactive\n");
  1758. +break;
  1759. +
  1760. +case CPUFREQ_GOV_LIMITS:
  1761. +mutex_lock(&this_dbs_info->timer_mutex);
  1762. +if (policy->max < this_dbs_info->cur_policy->cur)
  1763. +__cpufreq_driver_target(this_dbs_info->cur_policy,
  1764. +policy->max, CPUFREQ_RELATION_H);
  1765. +else if (policy->min > this_dbs_info->cur_policy->cur)
  1766. +__cpufreq_driver_target(this_dbs_info->cur_policy,
  1767. +policy->min, CPUFREQ_RELATION_L);
  1768. +mutex_unlock(&this_dbs_info->timer_mutex);
  1769. +break;
  1770. +}
  1771. +return 0;
  1772. +}
  1773. +
  1774. +static int __init cpufreq_gov_dbs_init(void)
  1775. +{
  1776. +cputime64_t wall;
  1777. +u64 idle_time;
  1778. +int cpu = get_cpu();
  1779. +
  1780. +idle_time = get_cpu_idle_time_us(cpu, &wall);
  1781. +put_cpu();
  1782. +if (idle_time != -1ULL) {
  1783. +/* Idle micro accounting is supported. Use finer thresholds */
  1784. +dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
  1785. +dbs_tuners_ins.down_differential =
  1786. +MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
  1787. +/*
  1788. +* In no_hz/micro accounting case we set the minimum frequency
  1789. +* not depending on HZ, but fixed (very low). The deferred
  1790. +* timer might skip some samples if idle/sleeping as needed.
  1791. +*/
  1792. +min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
  1793. +} else {
  1794. +/* For correct statistics, we need 10 ticks for each measure */
  1795. +min_sampling_rate =
  1796. +MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
  1797. +}
  1798. +
  1799. +        pr_info("[imoseyon] ondemandx enter\n");
  1800. +return cpufreq_register_governor(&cpufreq_gov_ondemandx);
  1801. +}
  1802. +
  1803. +static void __exit cpufreq_gov_dbs_exit(void)
  1804. +{
  1805. +        pr_info("[imoseyon] ondemandx exit\n");
  1806. +cpufreq_unregister_governor(&cpufreq_gov_ondemandx);
  1807. +}
  1808. +
  1809. +
  1810. +MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
  1811. +MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
  1812. +MODULE_DESCRIPTION("'cpufreq_ondemandx' - A dynamic cpufreq governor for "
  1813. +"Low Latency Frequency Transition capable processors");
  1814. +MODULE_LICENSE("GPL");
  1815. +
  1816. +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX
  1817. +fs_initcall(cpufreq_gov_dbs_init);
  1818. +#else
  1819. +module_init(cpufreq_gov_dbs_init);
  1820. +#endif
  1821. +module_exit(cpufreq_gov_dbs_exit);
  1822. diff -uNr Vanilla/drivers/cpufreq/Kconfig New/drivers/cpufreq/Kconfig
  1823. --- Vanilla/drivers/cpufreq/Kconfig 2011-11-30 01:52:01.000000000 -0600
  1824. +++ New/drivers/cpufreq/Kconfig 2012-07-06 17:45:58.000000000 -0500
  1825. @@ -99,6 +99,19 @@
  1826.       governor. If unsure have a look at the help section of the
  1827.       driver. Fallback governor will be the performance governor.
  1828.  
  1829. +config CPU_FREQ_DEFAULT_GOV_ONDEMANDX
  1830. +   bool "ondemandx"
  1831. +   select CPU_FREQ_GOV_ONDEMANDX
  1832. +   select CPU_FREQ_GOV_PERFORMANCE
  1833. +   help
  1834. +     Use the CPUFreq governor 'ondemand' as default. This allows
  1835. +     you to get a full dynamic frequency capable system by simply
  1836. +     loading your cpufreq low-level hardware driver.
  1837. +     Be aware that not all cpufreq drivers support the ondemand
  1838. +     governor. If unsure have a look at the help section of the
  1839. +     driver. Fallback governor will be the performance governor.
  1840. +     OndemandX has built in sleep profile, but not working Sysfs
  1841. +     interface
  1842.  config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
  1843.     bool "conservative"
  1844.     select CPU_FREQ_GOV_CONSERVATIVE
  1845. @@ -119,6 +132,20 @@
  1846.       you to get a full dynamic cpu frequency capable system by simply
  1847.       loading your cpufreq low-level hardware driver, using the
  1848.       'interactive' governor for latency-sensitive workloads.
  1849. +config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX
  1850. +   bool "interactiveX"
  1851. +   select CPU_FREQ_GOV_INTERACTIVEX
  1852. +   help
  1853. +     Use the CPUFreq governor 'interactiveX' as default. This allows
  1854. +     you to get a full dynamic cpu frequency capable system by simply
  1855. +     loading your cpufreq low-level hardware driver, using the
  1856. +     'interactiveX' governor for latency-sensitive workloads.
  1857. +
  1858. +config CPU_FREQ_DEFAULT_GOV_LIONHEART
  1859. +   bool "lionheart"
  1860. +   select CPU_FREQ_GOV_LIONHEART
  1861. +   help
  1862. +     Use the CPUFreq governor 'Lionheart' as default.
  1863.  
  1864.  endchoice
  1865.  
  1866. @@ -159,6 +186,31 @@
  1867.  
  1868.       If in doubt, say Y.
  1869.  
  1870. +config CPU_FREQ_GOV_ONDEMANDX
  1871. +   tristate "'ondemandx' cpufreq policy governor"
  1872. +   select CPU_FREQ_TABLE
  1873. +   help
  1874. +     'ondemand' - This driver adds a dynamic cpufreq policy governor.
  1875. +     The governor does a periodic polling and
  1876. +     changes frequency based on the CPU utilization.
  1877. +     The support for this governor depends on CPU capability to
  1878. +     do fast frequency switching (i.e, very low latency frequency
  1879. +     transitions).
  1880. +
  1881. +     To compile this driver as a module, choose M here: the
  1882. +     module will be called cpufreq_ondemand.
  1883. +
  1884. +          For details, take a look at linux/Documentation/cpu-freq.
  1885. +
  1886. +     If in doubt, say N.
  1887. +
  1888. +config CPU_FREQ_GOV_ONDEMANDX_INPUT
  1889. +   bool "Ramp up CPU frequency on input events"
  1890. +   default y
  1891. +   depends on CPU_FREQ_GOV_ONDEMANDX
  1892. +   help
  1893. +     Enable installation of an input event handler which will ramp up the
  1894. +     CPU to max frequency when an input event is received.
  1895.  config CPU_FREQ_GOV_ONDEMAND
  1896.     tristate "'ondemand' cpufreq policy governor"
  1897.     select CPU_FREQ_TABLE
  1898. @@ -206,4 +258,14 @@
  1899.  
  1900.       If in doubt, say N.
  1901.  
  1902. +config CPU_FREQ_GOV_LIONHEART
  1903. +   tristate "'Lionheart' cpufreq governor"
  1904. +   depends on CPU_FREQ
  1905. +   help
  1906. +     'Lionheart' - A brave and agile conservative-based governor.
  1907. +config CPU_FREQ_GOV_INTERACTIVEX
  1908. +   tristate "'interactiveX' cpufreq policy governor"
  1909. +   help
  1910. +     'interactiveX' - Modified version of interactive with sleep+wake code.
  1911. +
  1912.  endif  # CPU_FREQ
  1913. diff -uNr Vanilla/drivers/cpufreq/Makefile New/drivers/cpufreq/Makefile
  1914. --- Vanilla/drivers/cpufreq/Makefile    2011-11-30 01:52:01.000000000 -0600
  1915. +++ New/drivers/cpufreq/Makefile    2012-07-06 17:46:32.000000000 -0500
  1916. @@ -7,9 +7,12 @@
  1917.  obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
  1918.  obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)   += cpufreq_powersave.o
  1919.  obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)   += cpufreq_userspace.o
  1920. +obj-$(CONFIG_CPU_FREQ_GOV_ONDEMANDX)  += cpufreq_ondemandx.o
  1921.  obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)    += cpufreq_ondemand.o
  1922.  obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)    += cpufreq_conservative.o
  1923.  obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
  1924. +obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX)    += cpufreq_interactivex.o
  1925. +obj-$(CONFIG_CPU_FREQ_GOV_LIONHEART)    += cpufreq_lionheart.o
  1926.  
  1927.  # CPUfreq cross-arch helpers
  1928.  obj-$(CONFIG_CPU_FREQ_TABLE)       += freq_table.o
  1929. diff -uNr Vanilla/include/linux/cpufreq.h New/include/linux/cpufreq.h
  1930. --- Vanilla/include/linux/cpufreq.h 2011-11-30 01:51:52.000000000 -0600
  1931. +++ New/include/linux/cpufreq.h 2012-07-06 17:49:32.474953576 -0500
  1932. @@ -363,6 +363,9 @@
  1933.  #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
  1934.  extern struct cpufreq_governor cpufreq_gov_userspace;
  1935.  #define CPUFREQ_DEFAULT_GOVERNOR   (&cpufreq_gov_userspace)
  1936. +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMANDX)
  1937. +extern struct cpufreq_governor cpufreq_gov_ondemandx;
  1938. +#define CPUFREQ_DEFAULT_GOVERNOR   (&cpufreq_gov_ondemandx)
  1939.  #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
  1940.  extern struct cpufreq_governor cpufreq_gov_ondemand;
  1941.  #define CPUFREQ_DEFAULT_GOVERNOR   (&cpufreq_gov_ondemand)
  1942. @@ -372,9 +375,14 @@
  1943.  #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
  1944.  extern struct cpufreq_governor cpufreq_gov_interactive;
  1945.  #define CPUFREQ_DEFAULT_GOVERNOR   (&cpufreq_gov_interactive)
  1946. +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX)
  1947. +extern struct cpufreq_governor cpufreq_gov_interactivex;
  1948. +#define CPUFREQ_DEFAULT_GOVERNOR  (&cpufreq_gov_interactivex)
  1949. +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_LIONHEART)
  1950. +extern struct cpufreq_governor cpufreq_gov_lionheart;
  1951. +#define CPUFREQ_DEFAULT_GOVERNOR  (&cpufreq_gov_lionheart)
  1952.  #endif
  1953.  
  1954. -
  1955.  /*********************************************************************
  1956.   *                     FREQUENCY TABLE HELPERS                       *
  1957.   *********************************************************************/
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement