Guest User

smartassv2 fix by DerTeufel, based on the idea of stratosk

a guest
Mar 14th, 2012
217
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 30.16 KB | None | 0 0
  1. ############ smartassV2 fixed for live_oc ###############################################################
  2.  
  3. /*
  4. * drivers/cpufreq/cpufreq_smartass2.c
  5. *
  6. * Copyright (C) 2010 Google, Inc.
  7. *
  8. * This software is licensed under the terms of the GNU General Public
  9. * License version 2, as published by the Free Software Foundation, and
  10. * may be copied, distributed, and modified under those terms.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * Author: Erasmux
  18. *
  19. * Based on the interactive governor By Mike Chan ([email protected])
  20. * which was adaptated to 2.6.29 kernel by Nadlabak ([email protected])
  21. *
  22. * SMP support based on mod by faux123
  23. *
  24. * For a general overview of smartassV2 see the relavent part in
  25. * Documentation/cpu-freq/governors.txt
  26. *
  27. */
  28.  
  29. #include <linux/cpu.h>
  30. #include <linux/cpumask.h>
  31. #include <linux/cpufreq.h>
  32. #include <linux/sched.h>
  33. #include <linux/tick.h>
  34. #include <linux/timer.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/moduleparam.h>
  37. #include <asm/cputime.h>
  38. #include <linux/earlysuspend.h>
  39.  
  40. extern unsigned long cpuL1freq(void);
  41. extern unsigned long cpuL2freq(void);
  42. extern unsigned long cpuL3freq(void);
  43. extern unsigned long cpuL4freq(void);
  44. extern unsigned long cpuL5freq(void);
  45. extern unsigned long cpuL6freq(void);
  46. extern unsigned long cpuL7freq(void);
  47.  
  48. /******************** Tunable parameters: ********************/
  49.  
  50. /*
  51. * The "ideal" frequency to use when awake. The governor will ramp up faster
  52. * towards the ideal frequency and slower after it has passed it. Similarly,
  53. * lowering the frequency towards the ideal frequency is faster than below it.
  54. */
  55. #define DEFAULT_AWAKE_IDEAL_FREQ (800*1000)
  56. static unsigned int awake_ideal_freq;
  57.  
  58. /*
  59. * The "ideal" frequency to use when suspended.
  60. * When set to 0, the governor will not track the suspended state (meaning
  61. * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used
  62. * also when suspended).
  63. */
  64. #define DEFAULT_SLEEP_IDEAL_FREQ (200*1000)
  65. static unsigned int sleep_ideal_freq;
  66.  
  67. /*
  68. * Freqeuncy delta when ramping up above the ideal freqeuncy.
  69. * Zero disables and causes to always jump straight to max frequency.
  70. * When below the ideal freqeuncy we always ramp up to the ideal freq.
  71. */
  72. #define DEFAULT_RAMP_UP_STEP (200*1000)
  73. static unsigned int ramp_up_step;
  74.  
  75. /*
  76. * Freqeuncy delta when ramping down below the ideal freqeuncy.
  77. * Zero disables and will calculate ramp down according to load heuristic.
  78. * When above the ideal freqeuncy we always ramp down to the ideal freq.
  79. */
  80. #define DEFAULT_RAMP_DOWN_STEP (200*1000)
  81. static unsigned int ramp_down_step;
  82.  
  83. /*
  84. * CPU freq will be increased if measured load > max_cpu_load;
  85. */
  86. #define DEFAULT_MAX_CPU_LOAD 50
  87. static unsigned long max_cpu_load;
  88.  
  89. /*
  90. * CPU freq will be decreased if measured load < min_cpu_load;
  91. */
  92. #define DEFAULT_MIN_CPU_LOAD 25
  93. static unsigned long min_cpu_load;
  94.  
  95. /*
  96. * The minimum amount of time to spend at a frequency before we can ramp up.
  97. * Notice we ignore this when we are below the ideal frequency.
  98. */
  99. #define DEFAULT_UP_RATE_US 10000;
  100. static unsigned long up_rate_us;
  101.  
  102. /*
  103. * The minimum amount of time to spend at a frequency before we can ramp down.
  104. * Notice we ignore this when we are above the ideal frequency.
  105. */
  106. #define DEFAULT_DOWN_RATE_US 99000;
  107. static unsigned long down_rate_us;
  108.  
  109. /*
  110. * The frequency to set when waking up from sleep.
  111. * When sleep_ideal_freq=0 this will have no effect.
  112. */
  113. #define DEFAULT_SLEEP_WAKEUP_FREQ (800*1000)
  114. static unsigned int sleep_wakeup_freq;
  115.  
  116. /*
  117. * Sampling rate, I highly recommend to leave it at 2.
  118. */
  119. #define DEFAULT_SAMPLE_RATE_JIFFIES 2
  120. static unsigned int sample_rate_jiffies;
  121.  
  122.  
  123. /*************** End of tunables ***************/
  124.  
  125.  
  126. static void (*pm_idle_old)(void);
  127. static atomic_t active_count = ATOMIC_INIT(0);
  128.  
  129. struct smartass_info_s {
  130. struct cpufreq_policy *cur_policy;
  131. struct cpufreq_frequency_table *freq_table;
  132. struct timer_list timer;
  133. u64 time_in_idle;
  134. u64 idle_exit_time;
  135. u64 freq_change_time;
  136. u64 freq_change_time_in_idle;
  137. int cur_cpu_load;
  138. int old_freq;
  139. int ramp_dir;
  140. unsigned int enable;
  141. int ideal_speed;
  142. };
  143. static DEFINE_PER_CPU(struct smartass_info_s, smartass_info);
  144.  
  145. /* Workqueues handle frequency scaling */
  146. static struct workqueue_struct *up_wq;
  147. static struct workqueue_struct *down_wq;
  148. static struct work_struct freq_scale_work;
  149.  
  150. static cpumask_t work_cpumask;
  151. static spinlock_t cpumask_lock;
  152.  
  153. static unsigned int suspended;
  154.  
  155. #define dprintk(flag,msg...) do { \
  156. if (debug_mask & flag) printk(KERN_DEBUG msg); \
  157. } while (0)
  158.  
  159. enum {
  160. SMARTASS_DEBUG_JUMPS=1,
  161. SMARTASS_DEBUG_LOAD=2,
  162. SMARTASS_DEBUG_ALG=4
  163. };
  164.  
  165. /*
  166. * Combination of the above debug flags.
  167. */
  168. static unsigned long debug_mask;
  169.  
  170. static int cpufreq_governor_smartass(struct cpufreq_policy *policy,
  171. unsigned int event);
  172.  
  173. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
  174. static
  175. #endif
  176. struct cpufreq_governor cpufreq_gov_smartass2 = {
  177. .name = "smartassV2",
  178. .governor = cpufreq_governor_smartass,
  179. .max_transition_latency = 9000000,
  180. .owner = THIS_MODULE,
  181. };
  182.  
  183. inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) {
  184. if(sleep_ideal_freq >= cpuL7freq() && sleep_ideal_freq <= cpuL6freq())
  185. sleep_ideal_freq = cpuL6freq();
  186. else if(sleep_ideal_freq >= cpuL6freq() && sleep_ideal_freq <= cpuL5freq())
  187. sleep_ideal_freq = cpuL5freq();
  188. else sleep_ideal_freq = cpuL6freq();
  189.  
  190. if(awake_ideal_freq > cpuL3freq() && awake_ideal_freq <= cpuL2freq())
  191. awake_ideal_freq = cpuL2freq();
  192. else if(awake_ideal_freq > cpuL4freq() && awake_ideal_freq <= cpuL3freq())
  193. awake_ideal_freq = cpuL3freq();
  194. else if(awake_ideal_freq > cpuL5freq() && awake_ideal_freq <= cpuL4freq())
  195. awake_ideal_freq = cpuL4freq();
  196. else awake_ideal_freq = cpuL4freq();
  197. if (suspend) {
  198. this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max
  199. policy->max > sleep_ideal_freq ?
  200. (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max;
  201. } else {
  202. this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max
  203. policy->min < awake_ideal_freq ?
  204. (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min;
  205. }
  206. }
  207.  
  208. inline static void smartass_update_min_max_allcpus(void) {
  209. unsigned int i;
  210. for_each_online_cpu(i) {
  211. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i);
  212. if (this_smartass->enable)
  213. smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended);
  214. }
  215. }
  216.  
  217. inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) {
  218. if (freq > (int)policy->max)
  219. return policy->max;
  220. if (freq < (int)policy->min)
  221. return policy->min;
  222. return freq;
  223. }
  224.  
  225. inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) {
  226. this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time);
  227. mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
  228. }
  229.  
  230. inline static void work_cpumask_set(unsigned long cpu) {
  231. unsigned long flags;
  232. spin_lock_irqsave(&cpumask_lock, flags);
  233. cpumask_set_cpu(cpu, &work_cpumask);
  234. spin_unlock_irqrestore(&cpumask_lock, flags);
  235. }
  236.  
  237. inline static int work_cpumask_test_and_clear(unsigned long cpu) {
  238. unsigned long flags;
  239. int res = 0;
  240. spin_lock_irqsave(&cpumask_lock, flags);
  241. res = cpumask_test_and_clear_cpu(cpu, &work_cpumask);
  242. spin_unlock_irqrestore(&cpumask_lock, flags);
  243. return res;
  244. }
  245.  
  246. inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass,
  247. int new_freq, int old_freq, int prefered_relation) {
  248. int index, target;
  249. struct cpufreq_frequency_table *table = this_smartass->freq_table;
  250.  
  251. if (new_freq == old_freq)
  252. return 0;
  253. new_freq = validate_freq(policy,new_freq);
  254. if (new_freq == old_freq)
  255. return 0;
  256.  
  257. if (table &&
  258. !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index))
  259. {
  260. target = table[index].frequency;
  261. if (target == old_freq) {
  262. // if for example we are ramping up to *at most* current + ramp_up_step
  263. // but there is no such frequency higher than the current, try also
  264. // to ramp up to *at least* current + ramp_up_step.
  265. if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H
  266. && !cpufreq_frequency_table_target(policy,table,new_freq,
  267. CPUFREQ_RELATION_L,&index))
  268. target = table[index].frequency;
  269. // simlarly for ramping down:
  270. else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L
  271. && !cpufreq_frequency_table_target(policy,table,new_freq,
  272. CPUFREQ_RELATION_H,&index))
  273. target = table[index].frequency;
  274. }
  275.  
  276. if (target == old_freq) {
  277. // We should not get here:
  278. // If we got here we tried to change to a validated new_freq which is different
  279. // from old_freq, so there is no reason for us to remain at same frequency.
  280. printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n",
  281. old_freq,new_freq,target);
  282. return 0;
  283. }
  284. }
  285. else target = new_freq;
  286.  
  287. __cpufreq_driver_target(policy, target, prefered_relation);
  288.  
  289. dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n",
  290. old_freq,new_freq,target,policy->cur);
  291.  
  292. return target;
  293. }
  294.  
  295. static void cpufreq_smartass_timer(unsigned long cpu)
  296. {
  297. u64 delta_idle;
  298. u64 delta_time;
  299. int cpu_load;
  300. int old_freq;
  301. u64 update_time;
  302. u64 now_idle;
  303. int queued_work = 0;
  304. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
  305. struct cpufreq_policy *policy = this_smartass->cur_policy;
  306.  
  307. now_idle = get_cpu_idle_time_us(cpu, &update_time);
  308. old_freq = policy->cur;
  309.  
  310. if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time)
  311. return;
  312.  
  313. delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle);
  314. delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time);
  315.  
  316. // If timer ran less than 1ms after short-term sample started, retry.
  317. if (delta_time < 1000) {
  318. if (!timer_pending(&this_smartass->timer))
  319. reset_timer(cpu,this_smartass);
  320. return;
  321. }
  322.  
  323. if (delta_idle > delta_time)
  324. cpu_load = 0;
  325. else
  326. cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time;
  327.  
  328. dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n",
  329. old_freq,cpu_load,delta_time);
  330.  
  331. this_smartass->cur_cpu_load = cpu_load;
  332. this_smartass->old_freq = old_freq;
  333.  
  334. // Scale up if load is above max or if there where no idle cycles since coming out of idle,
  335. // additionally, if we are at or above the ideal_speed, verify we have been at this frequency
  336. // for at least up_rate_us:
  337. if (cpu_load > max_cpu_load || delta_idle == 0)
  338. {
  339. if (old_freq < policy->max &&
  340. (old_freq < this_smartass->ideal_speed || delta_idle == 0 ||
  341. cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us))
  342. {
  343. dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n",
  344. old_freq,cpu_load,delta_idle);
  345. this_smartass->ramp_dir = 1;
  346. work_cpumask_set(cpu);
  347. queue_work(up_wq, &freq_scale_work);
  348. queued_work = 1;
  349. }
  350. else this_smartass->ramp_dir = 0;
  351. }
  352. // Similarly for scale down: load should be below min and if we are at or below ideal
  353. // frequency we require that we have been at this frequency for at least down_rate_us:
  354. else if (cpu_load < min_cpu_load && old_freq > policy->min &&
  355. (old_freq > this_smartass->ideal_speed ||
  356. cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us))
  357. {
  358. dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n",
  359. old_freq,cpu_load,delta_idle);
  360. this_smartass->ramp_dir = -1;
  361. work_cpumask_set(cpu);
  362. queue_work(down_wq, &freq_scale_work);
  363. queued_work = 1;
  364. }
  365. else this_smartass->ramp_dir = 0;
  366.  
  367. // To avoid unnecessary load when the CPU is already at high load, we don't
  368. // reset ourselves if we are at max speed. If and when there are idle cycles,
  369. // the idle loop will activate the timer.
  370. // Additionally, if we queued some work, the work task will reset the timer
  371. // after it has done its adjustments.
  372. if (!queued_work && old_freq < policy->max)
  373. reset_timer(cpu,this_smartass);
  374. }
  375.  
  376. static void cpufreq_idle(void)
  377. {
  378. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
  379. struct cpufreq_policy *policy = this_smartass->cur_policy;
  380.  
  381. if (!this_smartass->enable) {
  382. pm_idle_old();
  383. return;
  384. }
  385.  
  386. if (policy->cur == policy->min && timer_pending(&this_smartass->timer))
  387. del_timer(&this_smartass->timer);
  388.  
  389. pm_idle_old();
  390.  
  391. if (!timer_pending(&this_smartass->timer))
  392. reset_timer(smp_processor_id(), this_smartass);
  393. }
  394.  
  395. /* We use the same work function to sale up and down */
  396. static void cpufreq_smartass_freq_change_time_work(struct work_struct *work)
  397. {
  398. unsigned int cpu;
  399. int new_freq;
  400. int old_freq;
  401. int ramp_dir;
  402. struct smartass_info_s *this_smartass;
  403. struct cpufreq_policy *policy;
  404. unsigned int relation = CPUFREQ_RELATION_L;
  405. for_each_possible_cpu(cpu) {
  406. this_smartass = &per_cpu(smartass_info, cpu);
  407. if (!work_cpumask_test_and_clear(cpu))
  408. continue;
  409.  
  410. ramp_dir = this_smartass->ramp_dir;
  411. this_smartass->ramp_dir = 0;
  412.  
  413. old_freq = this_smartass->old_freq;
  414. policy = this_smartass->cur_policy;
  415.  
  416. if (old_freq != policy->cur) {
  417. // frequency was changed by someone else?
  418. printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n",
  419. old_freq,policy->cur);
  420. new_freq = old_freq;
  421. }
  422. else if (ramp_dir > 0 && nr_running() > 1) {
  423. // ramp up logic:
  424. if (old_freq < this_smartass->ideal_speed)
  425. new_freq = this_smartass->ideal_speed;
  426. else if (ramp_up_step) {
  427. new_freq = old_freq + ramp_up_step;
  428. relation = CPUFREQ_RELATION_H;
  429. }
  430. else {
  431. new_freq = policy->max;
  432. relation = CPUFREQ_RELATION_H;
  433. }
  434. dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n",
  435. old_freq,ramp_dir,this_smartass->ideal_speed);
  436. }
  437. else if (ramp_dir < 0) {
  438. // ramp down logic:
  439. if (old_freq > this_smartass->ideal_speed) {
  440. new_freq = this_smartass->ideal_speed;
  441. relation = CPUFREQ_RELATION_H;
  442. }
  443. else if (ramp_down_step)
  444. new_freq = old_freq - ramp_down_step;
  445. else {
  446. // Load heuristics: Adjust new_freq such that, assuming a linear
  447. // scaling of load vs. frequency, the load in the new frequency
  448. // will be max_cpu_load:
  449. new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load;
  450. if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?!
  451. new_freq = old_freq -1;
  452. }
  453. dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n",
  454. old_freq,ramp_dir,this_smartass->ideal_speed);
  455. }
  456. else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down
  457. // before the work task gets to run?
  458. // This may also happen if we refused to ramp up because the nr_running()==1
  459. new_freq = old_freq;
  460. dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n",
  461. old_freq,ramp_dir,nr_running());
  462. }
  463.  
  464. // do actual ramp up (returns 0, if frequency change failed):
  465. new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation);
  466. if (new_freq)
  467. this_smartass->freq_change_time_in_idle =
  468. get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
  469.  
  470. // reset timer:
  471. if (new_freq < policy->max)
  472. reset_timer(cpu,this_smartass);
  473. // if we are maxed out, it is pointless to use the timer
  474. // (idle cycles wake up the timer when the timer comes)
  475. else if (timer_pending(&this_smartass->timer))
  476. del_timer(&this_smartass->timer);
  477. }
  478. }
  479.  
  480. static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf)
  481. {
  482. return sprintf(buf, "%lu\n", debug_mask);
  483. }
  484.  
  485. static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  486. {
  487. ssize_t res;
  488. unsigned long input;
  489. res = strict_strtoul(buf, 0, &input);
  490. if (res >= 0)
  491. debug_mask = input;
  492. return count;
  493. }
  494.  
  495. static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf)
  496. {
  497. return sprintf(buf, "%lu\n", up_rate_us);
  498. }
  499.  
  500. static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  501. {
  502. ssize_t res;
  503. unsigned long input;
  504. res = strict_strtoul(buf, 0, &input);
  505. if (res >= 0 && input >= 0 && input <= 100000000)
  506. up_rate_us = input;
  507. return count;
  508. }
  509.  
  510. static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf)
  511. {
  512. return sprintf(buf, "%lu\n", down_rate_us);
  513. }
  514.  
  515. static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  516. {
  517. ssize_t res;
  518. unsigned long input;
  519. res = strict_strtoul(buf, 0, &input);
  520. if (res >= 0 && input >= 0 && input <= 100000000)
  521. down_rate_us = input;
  522. return count;
  523. }
  524.  
  525. static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf)
  526. {
  527. return sprintf(buf, "%u\n", sleep_ideal_freq);
  528. }
  529.  
  530. static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  531. {
  532. ssize_t res;
  533. unsigned long input;
  534. res = strict_strtoul(buf, 0, &input);
  535. if (res >= 0 && input >= 0) {
  536. if(input > 0 && input <= cpuL7freq())
  537. sleep_ideal_freq = cpuL7freq();
  538. else if(input > cpuL7freq() && input <= cpuL6freq())
  539. sleep_ideal_freq = cpuL6freq();
  540. else if(input > cpuL6freq() && input <= cpuL5freq())
  541. sleep_ideal_freq = cpuL5freq();
  542. else sleep_ideal_freq = cpuL6freq();
  543. if (suspended)
  544. smartass_update_min_max_allcpus();
  545. }
  546. return count;
  547. }
  548.  
  549. static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf)
  550. {
  551. return sprintf(buf, "%u\n", sleep_wakeup_freq);
  552. }
  553.  
  554. static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  555. {
  556. ssize_t res;
  557. unsigned long input;
  558. res = strict_strtoul(buf, 0, &input);
  559. if (res >= 0 && input >= 0)
  560. if(input > cpuL2freq())
  561. sleep_wakeup_freq = cpuL1freq();
  562. else if(input > cpuL3freq() && input <= cpuL2freq())
  563. sleep_wakeup_freq = cpuL2freq();
  564. else if(input > cpuL4freq() && input <= cpuL3freq())
  565. sleep_wakeup_freq = cpuL3freq();
  566. else if(input > cpuL5freq() && input <= cpuL4freq())
  567. sleep_wakeup_freq = cpuL4freq();
  568. else if(input > cpuL6freq() && input <= cpuL5freq())
  569. sleep_wakeup_freq = cpuL5freq();
  570. else
  571. sleep_wakeup_freq = cpuL4freq();
  572. return count;
  573. }
  574.  
  575. static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf)
  576. {
  577. return sprintf(buf, "%u\n", awake_ideal_freq);
  578. }
  579.  
  580. static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  581. {
  582. ssize_t res;
  583. unsigned long input;
  584. res = strict_strtoul(buf, 0, &input);
  585. if (res >= 0 && input >= 0) {
  586. if(input > cpuL2freq())
  587. awake_ideal_freq = cpuL1freq();
  588. else if(input > cpuL3freq() && input <= cpuL2freq())
  589. awake_ideal_freq = cpuL2freq();
  590. else if(input > cpuL4freq() && input <= cpuL3freq())
  591. awake_ideal_freq = cpuL3freq();
  592. else if(input > cpuL5freq() && input <= cpuL4freq())
  593. awake_ideal_freq = cpuL4freq();
  594. else if(input > cpuL6freq() && input <= cpuL5freq())
  595. awake_ideal_freq = cpuL5freq();
  596. else
  597. awake_ideal_freq = cpuL4freq();
  598. if (!suspended)
  599. smartass_update_min_max_allcpus();
  600. }
  601. return count;
  602. }
  603.  
  604. static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf)
  605. {
  606. return sprintf(buf, "%u\n", sample_rate_jiffies);
  607. }
  608.  
  609. static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  610. {
  611. ssize_t res;
  612. unsigned long input;
  613. res = strict_strtoul(buf, 0, &input);
  614. if (res >= 0 && input > 0 && input <= 1000)
  615. sample_rate_jiffies = input;
  616. return count;
  617. }
  618.  
  619. static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf)
  620. {
  621. return sprintf(buf, "%u\n", ramp_up_step);
  622. }
  623.  
  624. static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  625. {
  626. ssize_t res;
  627. unsigned long input;
  628. res = strict_strtoul(buf, 0, &input);
  629. if (res >= 0 && input >= 0)
  630. ramp_up_step = input;
  631. return count;
  632. }
  633.  
  634. static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf)
  635. {
  636. return sprintf(buf, "%u\n", ramp_down_step);
  637. }
  638.  
  639. static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  640. {
  641. ssize_t res;
  642. unsigned long input;
  643. res = strict_strtoul(buf, 0, &input);
  644. if (res >= 0 && input >= 0)
  645. ramp_down_step = input;
  646. return count;
  647. }
  648.  
  649. static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf)
  650. {
  651. return sprintf(buf, "%lu\n", max_cpu_load);
  652. }
  653.  
  654. static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  655. {
  656. ssize_t res;
  657. unsigned long input;
  658. res = strict_strtoul(buf, 0, &input);
  659. if (res >= 0 && input > 0 && input <= 100)
  660. max_cpu_load = input;
  661. return count;
  662. }
  663.  
  664. static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf)
  665. {
  666. return sprintf(buf, "%lu\n", min_cpu_load);
  667. }
  668.  
  669. static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
  670. {
  671. ssize_t res;
  672. unsigned long input;
  673. res = strict_strtoul(buf, 0, &input);
  674. if (res >= 0 && input > 0 && input < 100)
  675. min_cpu_load = input;
  676. return count;
  677. }
  678.  
  679. #define define_global_rw_attr(_name) \
  680. static struct global_attr _name##_attr = \
  681. __ATTR(_name, 0644, show_##_name, store_##_name)
  682.  
  683. define_global_rw_attr(debug_mask);
  684. define_global_rw_attr(up_rate_us);
  685. define_global_rw_attr(down_rate_us);
  686. define_global_rw_attr(sleep_ideal_freq);
  687. define_global_rw_attr(sleep_wakeup_freq);
  688. define_global_rw_attr(awake_ideal_freq);
  689. define_global_rw_attr(sample_rate_jiffies);
  690. define_global_rw_attr(ramp_up_step);
  691. define_global_rw_attr(ramp_down_step);
  692. define_global_rw_attr(max_cpu_load);
  693. define_global_rw_attr(min_cpu_load);
  694.  
  695. static struct attribute * smartass_attributes[] = {
  696. &debug_mask_attr.attr,
  697. &up_rate_us_attr.attr,
  698. &down_rate_us_attr.attr,
  699. &sleep_ideal_freq_attr.attr,
  700. &sleep_wakeup_freq_attr.attr,
  701. &awake_ideal_freq_attr.attr,
  702. &sample_rate_jiffies_attr.attr,
  703. &ramp_up_step_attr.attr,
  704. &ramp_down_step_attr.attr,
  705. &max_cpu_load_attr.attr,
  706. &min_cpu_load_attr.attr,
  707. NULL,
  708. };
  709.  
  710. static struct attribute_group smartass_attr_group = {
  711. .attrs = smartass_attributes,
  712. .name = "smartass",
  713. };
  714.  
  715. static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy,
  716. unsigned int event)
  717. {
  718. unsigned int cpu = new_policy->cpu;
  719. int rc;
  720. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
  721.  
  722. switch (event) {
  723. case CPUFREQ_GOV_START:
  724. if ((!cpu_online(cpu)) || (!new_policy->cur))
  725. return -EINVAL;
  726.  
  727. this_smartass->cur_policy = new_policy;
  728.  
  729. this_smartass->enable = 1;
  730.  
  731. smartass_update_min_max(this_smartass,new_policy,suspended);
  732.  
  733. this_smartass->freq_table = cpufreq_frequency_get_table(cpu);
  734. if (!this_smartass->freq_table)
  735. printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu);
  736.  
  737. smp_wmb();
  738.  
  739. // Do not register the idle hook and create sysfs
  740. // entries if we have already done so.
  741. if (atomic_inc_return(&active_count) <= 1) {
  742. rc = sysfs_create_group(cpufreq_global_kobject,
  743. &smartass_attr_group);
  744. if (rc)
  745. return rc;
  746.  
  747. pm_idle_old = pm_idle;
  748. pm_idle = cpufreq_idle;
  749. }
  750.  
  751. if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
  752. reset_timer(cpu,this_smartass);
  753.  
  754. break;
  755.  
  756. case CPUFREQ_GOV_LIMITS:
  757. smartass_update_min_max(this_smartass,new_policy,suspended);
  758.  
  759. if (this_smartass->cur_policy->cur > new_policy->max) {
  760. dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max);
  761. __cpufreq_driver_target(this_smartass->cur_policy,
  762. new_policy->max, CPUFREQ_RELATION_H);
  763. }
  764. else if (this_smartass->cur_policy->cur < new_policy->min) {
  765. dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min);
  766. __cpufreq_driver_target(this_smartass->cur_policy,
  767. new_policy->min, CPUFREQ_RELATION_L);
  768. }
  769.  
  770. if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
  771. reset_timer(cpu,this_smartass);
  772.  
  773. break;
  774.  
  775. case CPUFREQ_GOV_STOP:
  776. this_smartass->enable = 0;
  777. smp_wmb();
  778. del_timer(&this_smartass->timer);
  779. flush_work(&freq_scale_work);
  780. this_smartass->idle_exit_time = 0;
  781.  
  782. if (atomic_dec_return(&active_count) <= 1) {
  783. sysfs_remove_group(cpufreq_global_kobject,
  784. &smartass_attr_group);
  785. pm_idle = pm_idle_old;
  786. }
  787. break;
  788. }
  789.  
  790. return 0;
  791. }
  792.  
  793. static void smartass_suspend(int cpu, int suspend)
  794. {
  795. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
  796. struct cpufreq_policy *policy = this_smartass->cur_policy;
  797. unsigned int new_freq;
  798.  
  799. if (!this_smartass->enable)
  800. return;
  801.  
  802. smartass_update_min_max(this_smartass,policy,suspend);
  803. if (!suspend) { // resume at max speed:
  804. new_freq = validate_freq(policy,sleep_wakeup_freq);
  805.  
  806. dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq);
  807.  
  808. __cpufreq_driver_target(policy, new_freq,
  809. CPUFREQ_RELATION_L);
  810. } else {
  811. // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep
  812. // to allow some time to settle down. Instead we just reset our statistics (and reset the timer).
  813. // Eventually, the timer will adjust the frequency if necessary.
  814.  
  815. this_smartass->freq_change_time_in_idle =
  816. get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
  817.  
  818. dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur);
  819. }
  820.  
  821. reset_timer(smp_processor_id(),this_smartass);
  822. }
  823.  
  824. static void smartass_early_suspend(struct early_suspend *handler) {
  825. int i;
  826. if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0
  827. return;
  828. suspended = 1;
  829. for_each_online_cpu(i)
  830. smartass_suspend(i,1);
  831. }
  832.  
  833. static void smartass_late_resume(struct early_suspend *handler) {
  834. int i;
  835. if (!suspended) // already not suspended so nothing to do
  836. return;
  837. suspended = 0;
  838. for_each_online_cpu(i)
  839. smartass_suspend(i,0);
  840. }
  841.  
  842. static struct early_suspend smartass_power_suspend = {
  843. .suspend = smartass_early_suspend,
  844. .resume = smartass_late_resume,
  845. #ifdef CONFIG_MACH_HERO
  846. .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
  847. #endif
  848. };
  849.  
  850. static int __init cpufreq_smartass_init(void)
  851. {
  852. unsigned int i;
  853. unsigned long low_freq;
  854. struct smartass_info_s *this_smartass;
  855.  
  856. low_freq = cpuL6freq();
  857. debug_mask = 0;
  858. up_rate_us = DEFAULT_UP_RATE_US;
  859. down_rate_us = DEFAULT_DOWN_RATE_US;
  860. // sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ;
  861. sleep_ideal_freq = low_freq;
  862. // sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ;
  863. sleep_wakeup_freq = cpuL4freq();
  864. // awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ;
  865. awake_ideal_freq = cpuL4freq();
  866. sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES;
  867. // ramp_up_step = DEFAULT_RAMP_UP_STEP;
  868. ramp_up_step = low_freq;
  869. // ramp_down_step = DEFAULT_RAMP_DOWN_STEP;
  870. ramp_down_step = low_freq;
  871. max_cpu_load = DEFAULT_MAX_CPU_LOAD;
  872. min_cpu_load = DEFAULT_MIN_CPU_LOAD;
  873.  
  874. spin_lock_init(&cpumask_lock);
  875.  
  876. suspended = 0;
  877.  
  878. /* Initalize per-cpu data: */
  879. for_each_possible_cpu(i) {
  880. this_smartass = &per_cpu(smartass_info, i);
  881. this_smartass->enable = 0;
  882. this_smartass->cur_policy = 0;
  883. this_smartass->ramp_dir = 0;
  884. this_smartass->time_in_idle = 0;
  885. this_smartass->idle_exit_time = 0;
  886. this_smartass->freq_change_time = 0;
  887. this_smartass->freq_change_time_in_idle = 0;
  888. this_smartass->cur_cpu_load = 0;
  889. // intialize timer:
  890. init_timer_deferrable(&this_smartass->timer);
  891. this_smartass->timer.function = cpufreq_smartass_timer;
  892. this_smartass->timer.data = i;
  893. work_cpumask_test_and_clear(i);
  894. }
  895.  
  896. // Scale up is high priority
  897. // FIXME
  898. up_wq = alloc_workqueue("ksmartass_up", WQ_HIGHPRI, 1);
  899. down_wq = alloc_workqueue("ksmartass_down", 0, 1);
  900. //up_wq = create_rt_workqueue("ksmartass_up");
  901. //down_wq = create_workqueue("ksmartass_down");
  902. if (!up_wq || !down_wq)
  903. return -ENOMEM;
  904.  
  905. INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work);
  906.  
  907. register_early_suspend(&smartass_power_suspend);
  908.  
  909. return cpufreq_register_governor(&cpufreq_gov_smartass2);
  910. }
  911.  
  912. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
  913. fs_initcall(cpufreq_smartass_init);
  914. #else
  915. module_init(cpufreq_smartass_init);
  916. #endif
  917.  
  918. static void __exit cpufreq_smartass_exit(void)
  919. {
  920. cpufreq_unregister_governor(&cpufreq_gov_smartass2);
  921. destroy_workqueue(up_wq);
  922. destroy_workqueue(down_wq);
  923. }
  924.  
  925. module_exit(cpufreq_smartass_exit);
  926.  
  927. MODULE_AUTHOR ("Erasmux");
  928. MODULE_DESCRIPTION ("'cpufreq_smartass2' - A smart cpufreq governor");
  929. MODULE_LICENSE ("GPL");
  930.  
  931.  
  932.  
  933. #################################################################################################################################### values exported from cpufreq.c ##############################################
  934.  
  935. unsigned long cpuL0freq(void)
  936. {
  937. return s5pv210_freq_table[L0].frequency; // 1400 mhz
  938. }
  939. EXPORT_SYMBOL(cpuL0freq);
  940.  
  941.  
  942. unsigned long cpuL1freq(void)
  943. {
  944. return s5pv210_freq_table[L1].frequency; // 1300 mhz
  945. }
  946. EXPORT_SYMBOL(cpuL1freq);
  947.  
  948. unsigned long cpuL2freq(void)
  949. {
  950. return s5pv210_freq_table[L2].frequency; // 1200 mhz
  951. }
  952. EXPORT_SYMBOL(cpuL2freq);
  953.  
  954. unsigned long cpuL3freq(void)
  955. {
  956. return s5pv210_freq_table[L3].frequency; // 1000 mhz
  957. }
  958. EXPORT_SYMBOL(cpuL3freq);
  959.  
  960. unsigned long cpuL4freq(void)
  961. {
  962. return s5pv210_freq_table[L4].frequency; // 800 mhz
  963. }
  964. EXPORT_SYMBOL(cpuL4freq);
  965.  
  966. unsigned long cpuL5freq(void)
  967. {
  968. return s5pv210_freq_table[L5].frequency; // 400 mhz
  969. }
  970. EXPORT_SYMBOL(cpuL5freq);
  971.  
  972. unsigned long cpuL6freq(void)
  973. {
  974. return s5pv210_freq_table[L6].frequency; // 200 mhz
  975. }
  976. EXPORT_SYMBOL(cpuL6freq);
  977.  
  978. unsigned long cpuL7freq(void)
  979. {
  980. return s5pv210_freq_table[L7].frequency; // 100 mhz
  981. }
  982. EXPORT_SYMBOL(cpuL7freq);
Advertisement
Add Comment
Please, Sign In to add comment