Guest User

A10 smartassV2 patch

a guest
Jun 9th, 2012
71
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 29.88 KB | None | 0 0
  1. diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
  2. index 9cf1990..ed26145 100755
  3. --- a/drivers/cpufreq/Kconfig
  4. +++ b/drivers/cpufreq/Kconfig
  5. @@ -118,6 +118,12 @@ config CPU_FREQ_DEFAULT_GOV_FANTASY
  6.       loading your cpufreq low-level hardware driver, using the
  7.       'fantasy' governor for latency-sensitive workloads.
  8.  
  9. +config CPU_FREQ_DEFAULT_GOV_SMARTASS2
  10. +   bool "smartass2"
  11. +   select CPU_FREQ_GOV_SMARTASS2
  12. +   help
  13. +       Use the CPUFreq governor 'smartassV2' as default.
  14. +
  15.  endchoice
  16.  
  17.  config CPU_FREQ_GOV_PERFORMANCE
  18. @@ -233,6 +239,13 @@ config CPU_FREQ_GOV_FANTASY
  19.  
  20.       If in doubt, say N.
  21.  
  22. +config CPU_FREQ_GOV_SMARTASS2
  23. +   tristate "'smartassV2' cpufreq governor"
  24. +   depends on CPU_FREQ
  25. +   help
  26. +       'smartassV2' - a "smart" governor
  27. +       If in doubt, say N.
  28. +
  29.  config CPU_FREQ_USR_EVNT_NOTIFY
  30.     bool "CPU frequency user event notify"
  31.     help
  32. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
  33. index 2831820..a1e71ed 100755
  34. --- a/drivers/cpufreq/Makefile
  35. +++ b/drivers/cpufreq/Makefile
  36. @@ -11,6 +11,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)   += cpufreq_ondemand.o
  37.  obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)    += cpufreq_conservative.o
  38.  obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
  39.  obj-$(CONFIG_CPU_FREQ_GOV_FANTASY)      += cpufreq_fantasy.o
  40. +obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2)   += cpufreq_smartass2.o
  41.  # CPUfreq cross-arch helpers
  42.  obj-$(CONFIG_CPU_FREQ_TABLE)       += freq_table.o
  43.  
  44. diff --git a/drivers/cpufreq/cpufreq_smartass2.c b/drivers/cpufreq/cpufreq_smartass2.c
  45. new file mode 100644
  46. index 0000000..b75c6c0
  47. --- /dev/null
  48. +++ b/drivers/cpufreq/cpufreq_smartass2.c
  49. @@ -0,0 +1,868 @@
  50. +/*
  51. + * drivers/cpufreq/cpufreq_smartass2.c
  52. + *
  53. + * Copyright (C) 2010 Google, Inc.
  54. + *
  55. + * This software is licensed under the terms of the GNU General Public
  56. + * License version 2, as published by the Free Software Foundation, and
  57. + * may be copied, distributed, and modified under those terms.
  58. + *
  59. + * This program is distributed in the hope that it will be useful,
  60. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  61. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  62. + * GNU General Public License for more details.
  63. + *
  64. + * Author: Erasmux
  65. + *
  66. + * Based on the interactive governor By Mike Chan ([email protected])
  67. + * which was adaptated to 2.6.29 kernel by Nadlabak ([email protected])
  68. + *
  69. + * SMP support based on mod by faux123
  70. + *
  71. + * For a general overview of smartassV2 see the relavent part in
  72. + * Documentation/cpu-freq/governors.txt
  73. + *
  74. + */
  75. +
  76. +#include <linux/cpu.h>
  77. +#include <linux/cpumask.h>
  78. +#include <linux/cpufreq.h>
  79. +#include <linux/sched.h>
  80. +#include <linux/tick.h>
  81. +#include <linux/timer.h>
  82. +#include <linux/workqueue.h>
  83. +#include <linux/moduleparam.h>
  84. +#include <asm/cputime.h>
  85. +#include <linux/earlysuspend.h>
  86. +
  87. +
  88. +/******************** Tunable parameters: ********************/
  89. +
  90. +/*
  91. + * The "ideal" frequency to use when awake. The governor will ramp up faster
  92. + * towards the ideal frequency and slower after it has passed it. Similarly,
  93. + * lowering the frequency towards the ideal frequency is faster than below it.
  94. + */
  95. +#define DEFAULT_AWAKE_IDEAL_FREQ 696000
  96. +static unsigned int awake_ideal_freq;
  97. +
  98. +/*
  99. + * The "ideal" frequency to use when suspended.
  100. + * When set to 0, the governor will not track the suspended state (meaning
  101. + * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used
  102. + * also when suspended).
  103. + */
  104. +#define DEFAULT_SLEEP_IDEAL_FREQ 108000
  105. +static unsigned int sleep_ideal_freq;
  106. +
  107. +/*
  108. + * Freqeuncy delta when ramping up above the ideal freqeuncy.
  109. + * Zero disables and causes to always jump straight to max frequency.
  110. + * When below the ideal freqeuncy we always ramp up to the ideal freq.
  111. + */
  112. +#define DEFAULT_RAMP_UP_STEP 256000
  113. +static unsigned int ramp_up_step;
  114. +
  115. +/*
  116. + * Freqeuncy delta when ramping down below the ideal freqeuncy.
  117. + * Zero disables and will calculate ramp down according to load heuristic.
  118. + * When above the ideal freqeuncy we always ramp down to the ideal freq.
  119. + */
  120. +#define DEFAULT_RAMP_DOWN_STEP 168000
  121. +static unsigned int ramp_down_step;
  122. +
  123. +/*
  124. + * CPU freq will be increased if measured load > max_cpu_load;
  125. + */
  126. +#define DEFAULT_MAX_CPU_LOAD 50
  127. +static unsigned long max_cpu_load;
  128. +
  129. +/*
  130. + * CPU freq will be decreased if measured load < min_cpu_load;
  131. + */
  132. +#define DEFAULT_MIN_CPU_LOAD 25
  133. +static unsigned long min_cpu_load;
  134. +
  135. +/*
  136. + * The minimum amount of time to spend at a frequency before we can ramp up.
  137. + * Notice we ignore this when we are below the ideal frequency.
  138. + */
  139. +#define DEFAULT_UP_RATE_US 48000;
  140. +static unsigned long up_rate_us;
  141. +
  142. +/*
  143. + * The minimum amount of time to spend at a frequency before we can ramp down.
  144. + * Notice we ignore this when we are above the ideal frequency.
  145. + */
  146. +#define DEFAULT_DOWN_RATE_US 99000;
  147. +static unsigned long down_rate_us;
  148. +
  149. +/*
  150. + * The frequency to set when waking up from sleep.
  151. + * When sleep_ideal_freq=0 this will have no effect.
  152. + */
  153. +#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999
  154. +static unsigned int sleep_wakeup_freq;
  155. +
  156. +/*
  157. + * Sampling rate, I highly recommend to leave it at 2.
  158. + */
  159. +#define DEFAULT_SAMPLE_RATE_JIFFIES 2
  160. +static unsigned int sample_rate_jiffies;
  161. +
  162. +
  163. +/*************** End of tunables ***************/
  164. +
  165. +
  166. +static void (*pm_idle_old)(void);
  167. +static atomic_t active_count = ATOMIC_INIT(0);
  168. +
  169. +struct smartass_info_s {
  170. +   struct cpufreq_policy *cur_policy;
  171. +   struct cpufreq_frequency_table *freq_table;
  172. +   struct timer_list timer;
  173. +   u64 time_in_idle;
  174. +   u64 idle_exit_time;
  175. +   u64 freq_change_time;
  176. +   u64 freq_change_time_in_idle;
  177. +   int cur_cpu_load;
  178. +   int old_freq;
  179. +   int ramp_dir;
  180. +   unsigned int enable;
  181. +   int ideal_speed;
  182. +};
  183. +static DEFINE_PER_CPU(struct smartass_info_s, smartass_info);
  184. +
  185. +/* Workqueues handle frequency scaling */
  186. +static struct workqueue_struct *up_wq;
  187. +static struct workqueue_struct *down_wq;
  188. +static struct work_struct freq_scale_work;
  189. +
  190. +static cpumask_t work_cpumask;
  191. +static spinlock_t cpumask_lock;
  192. +
  193. +static unsigned int suspended;
  194. +
  195. +#define dprintk(flag,msg...) do { \
  196. +   if (debug_mask & flag) printk(KERN_DEBUG msg); \
  197. +   } while (0)
  198. +
  199. +enum {
  200. +   SMARTASS_DEBUG_JUMPS=1,
  201. +   SMARTASS_DEBUG_LOAD=2,
  202. +   SMARTASS_DEBUG_ALG=4
  203. +};
  204. +
  205. +/*
  206. + * Combination of the above debug flags.
  207. + */
  208. +static unsigned long debug_mask;
  209. +
  210. +static int cpufreq_governor_smartass(struct cpufreq_policy *policy,
  211. +       unsigned int event);
  212. +
  213. +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
  214. +static
  215. +#endif
  216. +struct cpufreq_governor cpufreq_gov_smartass2 = {
  217. +   .name = "smartassV2",
  218. +   .governor = cpufreq_governor_smartass,
  219. +   .max_transition_latency = 9000000,
  220. +   .owner = THIS_MODULE,
  221. +};
  222. +
  223. +inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) {
  224. +   if (suspend) {
  225. +       this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max
  226. +           policy->max > sleep_ideal_freq ?
  227. +           (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max;
  228. +   } else {
  229. +       this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max
  230. +           policy->min < awake_ideal_freq ?
  231. +           (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min;
  232. +   }
  233. +}
  234. +
  235. +inline static void smartass_update_min_max_allcpus(void) {
  236. +   unsigned int i;
  237. +   for_each_online_cpu(i) {
  238. +       struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i);
  239. +       if (this_smartass->enable)
  240. +           smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended);
  241. +   }
  242. +}
  243. +
  244. +inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) {
  245. +   if (freq > (int)policy->max)
  246. +       return policy->max;
  247. +   if (freq < (int)policy->min)
  248. +       return policy->min;
  249. +   return freq;
  250. +}
  251. +
  252. +inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) {
  253. +   this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time);
  254. +   mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
  255. +}
  256. +
  257. +inline static void work_cpumask_set(unsigned long cpu) {
  258. +   unsigned long flags;
  259. +   spin_lock_irqsave(&cpumask_lock, flags);
  260. +   cpumask_set_cpu(cpu, &work_cpumask);
  261. +   spin_unlock_irqrestore(&cpumask_lock, flags);
  262. +}
  263. +
  264. +inline static int work_cpumask_test_and_clear(unsigned long cpu) {
  265. +   unsigned long flags;
  266. +   int res = 0;
  267. +   spin_lock_irqsave(&cpumask_lock, flags);
  268. +   res = cpumask_test_and_clear_cpu(cpu, &work_cpumask);
  269. +   spin_unlock_irqrestore(&cpumask_lock, flags);
  270. +   return res;
  271. +}
  272. +
  273. +inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass,
  274. +                 int new_freq, int old_freq, int prefered_relation) {
  275. +   int index, target;
  276. +   struct cpufreq_frequency_table *table = this_smartass->freq_table;
  277. +
  278. +   if (new_freq == old_freq)
  279. +       return 0;
  280. +   new_freq = validate_freq(policy,new_freq);
  281. +   if (new_freq == old_freq)
  282. +       return 0;
  283. +
  284. +   if (table &&
  285. +       !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index))
  286. +   {
  287. +       target = table[index].frequency;
  288. +       if (target == old_freq) {
  289. +           // if for example we are ramping up to *at most* current + ramp_up_step
  290. +           // but there is no such frequency higher than the current, try also
  291. +           // to ramp up to *at least* current + ramp_up_step.
  292. +           if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H
  293. +               && !cpufreq_frequency_table_target(policy,table,new_freq,
  294. +                                  CPUFREQ_RELATION_L,&index))
  295. +               target = table[index].frequency;
  296. +           // simlarly for ramping down:
  297. +           else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L
  298. +               && !cpufreq_frequency_table_target(policy,table,new_freq,
  299. +                                  CPUFREQ_RELATION_H,&index))
  300. +               target = table[index].frequency;
  301. +       }
  302. +
  303. +       if (target == old_freq) {
  304. +           // We should not get here:
  305. +           // If we got here we tried to change to a validated new_freq which is different
  306. +           // from old_freq, so there is no reason for us to remain at same frequency.
  307. +           printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n",
  308. +                  old_freq,new_freq,target);
  309. +           return 0;
  310. +       }
  311. +   }
  312. +   else target = new_freq;
  313. +
  314. +   __cpufreq_driver_target(policy, target, prefered_relation);
  315. +
  316. +   dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n",
  317. +       old_freq,new_freq,target,policy->cur);
  318. +
  319. +   return target;
  320. +}
  321. +
  322. +static void cpufreq_smartass_timer(unsigned long cpu)
  323. +{
  324. +   u64 delta_idle;
  325. +   u64 delta_time;
  326. +   int cpu_load;
  327. +   int old_freq;
  328. +   u64 update_time;
  329. +   u64 now_idle;
  330. +   int queued_work = 0;
  331. +   struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
  332. +   struct cpufreq_policy *policy = this_smartass->cur_policy;
  333. +
  334. +   now_idle = get_cpu_idle_time_us(cpu, &update_time);
  335. +   old_freq = policy->cur;
  336. +
  337. +   if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time)
  338. +       return;
  339. +
  340. +   delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle);
  341. +   delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time);
  342. +
  343. +   // If timer ran less than 1ms after short-term sample started, retry.
  344. +   if (delta_time < 1000) {
  345. +       if (!timer_pending(&this_smartass->timer))
  346. +           reset_timer(cpu,this_smartass);
  347. +       return;
  348. +   }
  349. +
  350. +   if (delta_idle > delta_time)
  351. +       cpu_load = 0;
  352. +   else
  353. +       cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time;
  354. +
  355. +   dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n",
  356. +       old_freq,cpu_load,delta_time);
  357. +
  358. +   this_smartass->cur_cpu_load = cpu_load;
  359. +   this_smartass->old_freq = old_freq;
  360. +
  361. +   // Scale up if load is above max or if there where no idle cycles since coming out of idle,
  362. +   // additionally, if we are at or above the ideal_speed, verify we have been at this frequency
  363. +   // for at least up_rate_us:
  364. +   if (cpu_load > max_cpu_load || delta_idle == 0)
  365. +   {
  366. +       if (old_freq < policy->max &&
  367. +            (old_freq < this_smartass->ideal_speed || delta_idle == 0 ||
  368. +             cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us))
  369. +       {
  370. +           dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n",
  371. +               old_freq,cpu_load,delta_idle);
  372. +           this_smartass->ramp_dir = 1;
  373. +           work_cpumask_set(cpu);
  374. +           queue_work(up_wq, &freq_scale_work);
  375. +           queued_work = 1;
  376. +       }
  377. +       else this_smartass->ramp_dir = 0;
  378. +   }
  379. +   // Similarly for scale down: load should be below min and if we are at or below ideal
  380. +   // frequency we require that we have been at this frequency for at least down_rate_us:
  381. +   else if (cpu_load < min_cpu_load && old_freq > policy->min &&
  382. +        (old_freq > this_smartass->ideal_speed ||
  383. +         cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us))
  384. +   {
  385. +       dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n",
  386. +           old_freq,cpu_load,delta_idle);
  387. +       this_smartass->ramp_dir = -1;
  388. +       work_cpumask_set(cpu);
  389. +       queue_work(down_wq, &freq_scale_work);
  390. +       queued_work = 1;
  391. +   }
  392. +   else this_smartass->ramp_dir = 0;
  393. +
  394. +   // To avoid unnecessary load when the CPU is already at high load, we don't
  395. +   // reset ourselves if we are at max speed. If and when there are idle cycles,
  396. +   // the idle loop will activate the timer.
  397. +   // Additionally, if we queued some work, the work task will reset the timer
  398. +   // after it has done its adjustments.
  399. +   if (!queued_work && old_freq < policy->max)
  400. +       reset_timer(cpu,this_smartass);
  401. +}
  402. +
  403. +static void cpufreq_idle(void)
  404. +{
  405. +   struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
  406. +   struct cpufreq_policy *policy = this_smartass->cur_policy;
  407. +
  408. +   if (!this_smartass->enable) {
  409. +       pm_idle_old();
  410. +       return;
  411. +   }
  412. +
  413. +   if (policy->cur == policy->min && timer_pending(&this_smartass->timer))
  414. +       del_timer(&this_smartass->timer);
  415. +
  416. +   pm_idle_old();
  417. +
  418. +   if (!timer_pending(&this_smartass->timer))
  419. +       reset_timer(smp_processor_id(), this_smartass);
  420. +}
  421. +
  422. +/* We use the same work function to sale up and down */
  423. +static void cpufreq_smartass_freq_change_time_work(struct work_struct *work)
  424. +{
  425. +   unsigned int cpu;
  426. +   int new_freq;
  427. +   int old_freq;
  428. +   int ramp_dir;
  429. +   struct smartass_info_s *this_smartass;
  430. +   struct cpufreq_policy *policy;
  431. +   unsigned int relation = CPUFREQ_RELATION_L;
  432. +   for_each_possible_cpu(cpu) {
  433. +       this_smartass = &per_cpu(smartass_info, cpu);
  434. +       if (!work_cpumask_test_and_clear(cpu))
  435. +           continue;
  436. +
  437. +       ramp_dir = this_smartass->ramp_dir;
  438. +       this_smartass->ramp_dir = 0;
  439. +
  440. +       old_freq = this_smartass->old_freq;
  441. +       policy = this_smartass->cur_policy;
  442. +
  443. +       if (old_freq != policy->cur) {
  444. +           // frequency was changed by someone else?
  445. +           printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n",
  446. +                  old_freq,policy->cur);
  447. +           new_freq = old_freq;
  448. +       }
  449. +       else if (ramp_dir > 0 && nr_running() > 1) {
  450. +           // ramp up logic:
  451. +           if (old_freq < this_smartass->ideal_speed)
  452. +               new_freq = this_smartass->ideal_speed;
  453. +           else if (ramp_up_step) {
  454. +               new_freq = old_freq + ramp_up_step;
  455. +               relation = CPUFREQ_RELATION_H;
  456. +           }
  457. +           else {
  458. +               new_freq = policy->max;
  459. +               relation = CPUFREQ_RELATION_H;
  460. +           }
  461. +           dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n",
  462. +               old_freq,ramp_dir,this_smartass->ideal_speed);
  463. +       }
  464. +       else if (ramp_dir < 0) {
  465. +           // ramp down logic:
  466. +           if (old_freq > this_smartass->ideal_speed) {
  467. +               new_freq = this_smartass->ideal_speed;
  468. +               relation = CPUFREQ_RELATION_H;
  469. +           }
  470. +           else if (ramp_down_step)
  471. +               new_freq = old_freq - ramp_down_step;
  472. +           else {
  473. +               // Load heuristics: Adjust new_freq such that, assuming a linear
  474. +               // scaling of load vs. frequency, the load in the new frequency
  475. +               // will be max_cpu_load:
  476. +               new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load;
  477. +               if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?!
  478. +                   new_freq = old_freq -1;
  479. +           }
  480. +           dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n",
  481. +               old_freq,ramp_dir,this_smartass->ideal_speed);
  482. +       }
  483. +       else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down
  484. +              // before the work task gets to run?
  485. +              // This may also happen if we refused to ramp up because the nr_running()==1
  486. +           new_freq = old_freq;
  487. +           dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n",
  488. +               old_freq,ramp_dir,nr_running());
  489. +       }
  490. +
  491. +       // do actual ramp up (returns 0, if frequency change failed):
  492. +       new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation);
  493. +       if (new_freq)
  494. +           this_smartass->freq_change_time_in_idle =
  495. +               get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
  496. +
  497. +       // reset timer:
  498. +       if (new_freq < policy->max)
  499. +           reset_timer(cpu,this_smartass);
  500. +       // if we are maxed out, it is pointless to use the timer
  501. +       // (idle cycles wake up the timer when the timer comes)
  502. +       else if (timer_pending(&this_smartass->timer))
  503. +           del_timer(&this_smartass->timer);
  504. +   }
  505. +}
  506. +
  507. +static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf)
  508. +{
  509. +   return sprintf(buf, "%lu\n", debug_mask);
  510. +}
  511. +
  512. +static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  513. +{
  514. +   ssize_t res;
  515. +   unsigned long input;
  516. +   res = strict_strtoul(buf, 0, &input);
  517. +   if (res >= 0)
  518. +       debug_mask = input;
  519. +   return res;
  520. +}
  521. +
  522. +static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf)
  523. +{
  524. +   return sprintf(buf, "%lu\n", up_rate_us);
  525. +}
  526. +
  527. +static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  528. +{
  529. +   ssize_t res;
  530. +   unsigned long input;
  531. +   res = strict_strtoul(buf, 0, &input);
  532. +   if (res >= 0 && input >= 0 && input <= 100000000)
  533. +       up_rate_us = input;
  534. +   return res;
  535. +}
  536. +
  537. +static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf)
  538. +{
  539. +   return sprintf(buf, "%lu\n", down_rate_us);
  540. +}
  541. +
  542. +static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  543. +{
  544. +   ssize_t res;
  545. +   unsigned long input;
  546. +   res = strict_strtoul(buf, 0, &input);
  547. +   if (res >= 0 && input >= 0 && input <= 100000000)
  548. +       down_rate_us = input;
  549. +   return res;
  550. +}
  551. +
  552. +static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf)
  553. +{
  554. +   return sprintf(buf, "%u\n", sleep_ideal_freq);
  555. +}
  556. +
  557. +static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  558. +{
  559. +   ssize_t res;
  560. +   unsigned long input;
  561. +   res = strict_strtoul(buf, 0, &input);
  562. +   if (res >= 0 && input >= 0) {
  563. +       sleep_ideal_freq = input;
  564. +       if (suspended)
  565. +           smartass_update_min_max_allcpus();
  566. +   }
  567. +   return res;
  568. +}
  569. +
  570. +static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf)
  571. +{
  572. +   return sprintf(buf, "%u\n", sleep_wakeup_freq);
  573. +}
  574. +
  575. +static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  576. +{
  577. +   ssize_t res;
  578. +   unsigned long input;
  579. +   res = strict_strtoul(buf, 0, &input);
  580. +   if (res >= 0 && input >= 0)
  581. +       sleep_wakeup_freq = input;
  582. +   return res;
  583. +}
  584. +
  585. +static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf)
  586. +{
  587. +   return sprintf(buf, "%u\n", awake_ideal_freq);
  588. +}
  589. +
  590. +static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  591. +{
  592. +   ssize_t res;
  593. +   unsigned long input;
  594. +   res = strict_strtoul(buf, 0, &input);
  595. +   if (res >= 0 && input >= 0) {
  596. +       awake_ideal_freq = input;
  597. +       if (!suspended)
  598. +           smartass_update_min_max_allcpus();
  599. +   }
  600. +   return res;
  601. +}
  602. +
  603. +static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf)
  604. +{
  605. +   return sprintf(buf, "%u\n", sample_rate_jiffies);
  606. +}
  607. +
  608. +static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  609. +{
  610. +   ssize_t res;
  611. +   unsigned long input;
  612. +   res = strict_strtoul(buf, 0, &input);
  613. +   if (res >= 0 && input > 0 && input <= 1000)
  614. +       sample_rate_jiffies = input;
  615. +   return res;
  616. +}
  617. +
  618. +static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf)
  619. +{
  620. +   return sprintf(buf, "%u\n", ramp_up_step);
  621. +}
  622. +
  623. +static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  624. +{
  625. +   ssize_t res;
  626. +   unsigned long input;
  627. +   res = strict_strtoul(buf, 0, &input);
  628. +   if (res >= 0 && input >= 0)
  629. +       ramp_up_step = input;
  630. +   return res;
  631. +}
  632. +
  633. +static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf)
  634. +{
  635. +   return sprintf(buf, "%u\n", ramp_down_step);
  636. +}
  637. +
  638. +static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  639. +{
  640. +   ssize_t res;
  641. +   unsigned long input;
  642. +   res = strict_strtoul(buf, 0, &input);
  643. +   if (res >= 0 && input >= 0)
  644. +       ramp_down_step = input;
  645. +   return res;
  646. +}
  647. +
  648. +static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf)
  649. +{
  650. +   return sprintf(buf, "%lu\n", max_cpu_load);
  651. +}
  652. +
  653. +static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  654. +{
  655. +   ssize_t res;
  656. +   unsigned long input;
  657. +   res = strict_strtoul(buf, 0, &input);
  658. +   if (res >= 0 && input > 0 && input <= 100)
  659. +       max_cpu_load = input;
  660. +   return res;
  661. +}
  662. +
  663. +static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf)
  664. +{
  665. +   return sprintf(buf, "%lu\n", min_cpu_load);
  666. +}
  667. +
  668. +static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  669. +{
  670. +   ssize_t res;
  671. +   unsigned long input;
  672. +   res = strict_strtoul(buf, 0, &input);
  673. +   if (res >= 0 && input > 0 && input < 100)
  674. +       min_cpu_load = input;
  675. +   return res;
  676. +}
  677. +
  678. +#define define_global_rw_attr(_name)       \
  679. +static struct global_attr _name##_attr =   \
  680. +   __ATTR(_name, 0644, show_##_name, store_##_name)
  681. +
  682. +define_global_rw_attr(debug_mask);
  683. +define_global_rw_attr(up_rate_us);
  684. +define_global_rw_attr(down_rate_us);
  685. +define_global_rw_attr(sleep_ideal_freq);
  686. +define_global_rw_attr(sleep_wakeup_freq);
  687. +define_global_rw_attr(awake_ideal_freq);
  688. +define_global_rw_attr(sample_rate_jiffies);
  689. +define_global_rw_attr(ramp_up_step);
  690. +define_global_rw_attr(ramp_down_step);
  691. +define_global_rw_attr(max_cpu_load);
  692. +define_global_rw_attr(min_cpu_load);
  693. +
  694. +static struct attribute * smartass_attributes[] = {
  695. +   &debug_mask_attr.attr,
  696. +   &up_rate_us_attr.attr,
  697. +   &down_rate_us_attr.attr,
  698. +   &sleep_ideal_freq_attr.attr,
  699. +   &sleep_wakeup_freq_attr.attr,
  700. +   &awake_ideal_freq_attr.attr,
  701. +   &sample_rate_jiffies_attr.attr,
  702. +   &ramp_up_step_attr.attr,
  703. +   &ramp_down_step_attr.attr,
  704. +   &max_cpu_load_attr.attr,
  705. +   &min_cpu_load_attr.attr,
  706. +   NULL,
  707. +};
  708. +
  709. +static struct attribute_group smartass_attr_group = {
  710. +   .attrs = smartass_attributes,
  711. +   .name = "smartass",
  712. +};
  713. +
  714. +static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy,
  715. +       unsigned int event)
  716. +{
  717. +   unsigned int cpu = new_policy->cpu;
  718. +   int rc;
  719. +   struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
  720. +
  721. +   switch (event) {
  722. +   case CPUFREQ_GOV_START:
  723. +       if ((!cpu_online(cpu)) || (!new_policy->cur))
  724. +           return -EINVAL;
  725. +
  726. +       this_smartass->cur_policy = new_policy;
  727. +
  728. +       this_smartass->enable = 1;
  729. +
  730. +       smartass_update_min_max(this_smartass,new_policy,suspended);
  731. +
  732. +       this_smartass->freq_table = cpufreq_frequency_get_table(cpu);
  733. +       if (!this_smartass->freq_table)
  734. +           printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu);
  735. +
  736. +       smp_wmb();
  737. +
  738. +       // Do not register the idle hook and create sysfs
  739. +       // entries if we have already done so.
  740. +       if (atomic_inc_return(&active_count) <= 1) {
  741. +           rc = sysfs_create_group(cpufreq_global_kobject,
  742. +                       &smartass_attr_group);
  743. +           if (rc)
  744. +               return rc;
  745. +
  746. +           pm_idle_old = pm_idle;
  747. +           pm_idle = cpufreq_idle;
  748. +       }
  749. +
  750. +       if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
  751. +           reset_timer(cpu,this_smartass);
  752. +
  753. +       break;
  754. +
  755. +   case CPUFREQ_GOV_LIMITS:
  756. +       smartass_update_min_max(this_smartass,new_policy,suspended);
  757. +
  758. +       if (this_smartass->cur_policy->cur > new_policy->max) {
  759. +           dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max);
  760. +           __cpufreq_driver_target(this_smartass->cur_policy,
  761. +                       new_policy->max, CPUFREQ_RELATION_H);
  762. +       }
  763. +       else if (this_smartass->cur_policy->cur < new_policy->min) {
  764. +           dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min);
  765. +           __cpufreq_driver_target(this_smartass->cur_policy,
  766. +                       new_policy->min, CPUFREQ_RELATION_L);
  767. +       }
  768. +
  769. +       if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
  770. +           reset_timer(cpu,this_smartass);
  771. +
  772. +       break;
  773. +
  774. +   case CPUFREQ_GOV_STOP:
  775. +       this_smartass->enable = 0;
  776. +       smp_wmb();
  777. +       del_timer(&this_smartass->timer);
  778. +       flush_work(&freq_scale_work);
  779. +       this_smartass->idle_exit_time = 0;
  780. +
  781. +       if (atomic_dec_return(&active_count) <= 1) {
  782. +           sysfs_remove_group(cpufreq_global_kobject,
  783. +                      &smartass_attr_group);
  784. +           pm_idle = pm_idle_old;
  785. +       }
  786. +       break;
  787. +   }
  788. +
  789. +   return 0;
  790. +}
  791. +
  792. +static void smartass_suspend(int cpu, int suspend)
  793. +{
  794. +   struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
  795. +   struct cpufreq_policy *policy = this_smartass->cur_policy;
  796. +   unsigned int new_freq;
  797. +
  798. +   if (!this_smartass->enable)
  799. +       return;
  800. +
  801. +   smartass_update_min_max(this_smartass,policy,suspend);
  802. +   if (!suspend) { // resume at max speed:
  803. +       new_freq = validate_freq(policy,sleep_wakeup_freq);
  804. +
  805. +       dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq);
  806. +
  807. +       __cpufreq_driver_target(policy, new_freq,
  808. +                   CPUFREQ_RELATION_L);
  809. +   } else {
  810. +       // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep
  811. +       // to allow some time to settle down. Instead we just reset our statistics (and reset the timer).
  812. +       // Eventually, the timer will adjust the frequency if necessary.
  813. +
  814. +       this_smartass->freq_change_time_in_idle =
  815. +           get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
  816. +
  817. +       dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur);
  818. +   }
  819. +
  820. +   reset_timer(smp_processor_id(),this_smartass);
  821. +}
  822. +
  823. +static void smartass_early_suspend(struct early_suspend *handler) {
  824. +   int i;
  825. +   if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0
  826. +       return;
  827. +   suspended = 1;
  828. +   for_each_online_cpu(i)
  829. +       smartass_suspend(i,1);
  830. +}
  831. +
  832. +static void smartass_late_resume(struct early_suspend *handler) {
  833. +   int i;
  834. +   if (!suspended) // already not suspended so nothing to do
  835. +       return;
  836. +   suspended = 0;
  837. +   for_each_online_cpu(i)
  838. +       smartass_suspend(i,0);
  839. +}
  840. +
  841. +static struct early_suspend smartass_power_suspend = {
  842. +   .suspend = smartass_early_suspend,
  843. +   .resume = smartass_late_resume,
  844. +#ifdef CONFIG_MACH_HERO
  845. +   .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
  846. +#endif
  847. +};
  848. +
  849. +static int __init cpufreq_smartass_init(void)
  850. +{
  851. +   unsigned int i;
  852. +   struct smartass_info_s *this_smartass;
  853. +   debug_mask = 0;
  854. +   up_rate_us = DEFAULT_UP_RATE_US;
  855. +   down_rate_us = DEFAULT_DOWN_RATE_US;
  856. +   sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ;
  857. +   sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ;
  858. +   awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ;
  859. +   sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES;
  860. +   ramp_up_step = DEFAULT_RAMP_UP_STEP;
  861. +   ramp_down_step = DEFAULT_RAMP_DOWN_STEP;
  862. +   max_cpu_load = DEFAULT_MAX_CPU_LOAD;
  863. +   min_cpu_load = DEFAULT_MIN_CPU_LOAD;
  864. +
  865. +   spin_lock_init(&cpumask_lock);
  866. +
  867. +   suspended = 0;
  868. +
  869. +   /* Initalize per-cpu data: */
  870. +   for_each_possible_cpu(i) {
  871. +       this_smartass = &per_cpu(smartass_info, i);
  872. +       this_smartass->enable = 0;
  873. +       this_smartass->cur_policy = 0;
  874. +       this_smartass->ramp_dir = 0;
  875. +       this_smartass->time_in_idle = 0;
  876. +       this_smartass->idle_exit_time = 0;
  877. +       this_smartass->freq_change_time = 0;
  878. +       this_smartass->freq_change_time_in_idle = 0;
  879. +       this_smartass->cur_cpu_load = 0;
  880. +       // intialize timer:
  881. +       init_timer_deferrable(&this_smartass->timer);
  882. +       this_smartass->timer.function = cpufreq_smartass_timer;
  883. +       this_smartass->timer.data = i;
  884. +       work_cpumask_test_and_clear(i);
  885. +   }
  886. +
  887. +   // Scale up is high priority
  888. +   up_wq = alloc_workqueue("ksmartass_up", WQ_HIGHPRI, 1);
  889. +   down_wq = alloc_workqueue("ksmartass_down", 0, 1);
  890. +   if (!up_wq || !down_wq)
  891. +       return -ENOMEM;
  892. +
  893. +   INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work);
  894. +
  895. +   register_early_suspend(&smartass_power_suspend);
  896. +
  897. +   return cpufreq_register_governor(&cpufreq_gov_smartass2);
  898. +}
  899. +
  900. +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
  901. +fs_initcall(cpufreq_smartass_init);
  902. +#else
  903. +module_init(cpufreq_smartass_init);
  904. +#endif
  905. +
  906. +static void __exit cpufreq_smartass_exit(void)
  907. +{
  908. +   cpufreq_unregister_governor(&cpufreq_gov_smartass2);
  909. +   destroy_workqueue(up_wq);
  910. +   destroy_workqueue(down_wq);
  911. +}
  912. +
  913. +module_exit(cpufreq_smartass_exit);
  914. +
  915. +MODULE_AUTHOR ("Erasmux");
  916. +MODULE_DESCRIPTION ("'cpufreq_smartass2' - A smart cpufreq governor");
  917. +MODULE_LICENSE ("GPL");
  918. diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
  919. index 2f831c0..88615cd 100755
  920. --- a/include/linux/cpufreq.h
  921. +++ b/include/linux/cpufreq.h
  922. @@ -370,6 +370,9 @@ extern struct cpufreq_governor cpufreq_gov_interactive;
  923.  #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_FANTASY)
  924.  extern struct cpufreq_governor cpufreq_gov_fantasy;
  925.  #define CPUFREQ_DEFAULT_GOVERNOR   (&cpufreq_gov_fantasy)
  926. +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2)
  927. +extern struct cpufreq_governor cpufreq_gov_smartass2;
  928. +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_smartass2)
  929.  #endif
  930.  
  931.  
  932. diff --git a/kernel/sched.c b/kernel/sched.c
  933. index d488880..2c80597 100644
  934. --- a/kernel/sched.c
  935. +++ b/kernel/sched.c
  936. @@ -9415,3 +9415,4 @@ struct cgroup_subsys cpuacct_subsys = {
  937.  };
  938.  #endif /* CONFIG_CGROUP_CPUACCT */
  939.  
  940. +EXPORT_SYMBOL_GPL(nr_running);
Advertisement
Add Comment
Please, Sign In to add comment