Advertisement
thewadegeek

Smartass Kernel Govenor

Jul 5th, 2012
5,206
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 16.77 KB | None | 0 0
  1. /*
  2. * drivers/cpufreq/cpufreq_smartass.c
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * Author: Erasmux
  16. *
  17. * Based on the interactive governor By Mike Chan ([email protected])
  18. * which was adaptated to 2.6.29 kernel by Nadlabak ([email protected])
  19. *
  20. * requires to add
  21. * EXPORT_SYMBOL_GPL(nr_running);
  22. * at the end of kernel/sched.c
  23. *
  24. */
  25.  
  26. #include <linux/cpu.h>
  27. #include <linux/cpumask.h>
  28. #include <linux/cpufreq.h>
  29. #include <linux/sched.h>
  30. #include <linux/tick.h>
  31. #include <linux/timer.h>
  32. #include <linux/workqueue.h>
  33. #include <linux/moduleparam.h>
  34. #include <asm/cputime.h>
  35. #include <linux/earlysuspend.h>
  36.  
  37. static void (*pm_idle_old)(void);
  38. static atomic_t active_count = ATOMIC_INIT(0);
  39.  
  40. struct smartass_info_s {
  41. struct cpufreq_policy *cur_policy;
  42. struct timer_list timer;
  43. u64 time_in_idle;
  44. u64 idle_exit_time;
  45. unsigned int force_ramp_up;
  46. unsigned int enable;
  47. };
  48. static DEFINE_PER_CPU(struct smartass_info_s, smartass_info);
  49.  
  50. /* Workqueues handle frequency scaling */
  51. static struct workqueue_struct *up_wq;
  52. static struct workqueue_struct *down_wq;
  53. static struct work_struct freq_scale_work;
  54.  
  55. static u64 freq_change_time;
  56. static u64 freq_change_time_in_idle;
  57.  
  58. static cpumask_t work_cpumask;
  59. static unsigned int suspended;
  60.  
  61. /*
  62. * The minimum amount of time to spend at a frequency before we can ramp down,
  63. * default is 45ms.
  64. */
  65. #define DEFAULT_DOWN_RATE_US 20000
  66. static unsigned long down_rate_us;
  67.  
  68. /*
  69. * When ramping up frequency with no idle cycles jump to at least this frequency.
  70. * Zero disables. Set a very high value to jump to policy max freqeuncy.
  71. */
  72. #define DEFAULT_UP_MIN_FREQ 1152000
  73. static unsigned int up_min_freq;
  74.  
  75. /*
  76. * When sleep_max_freq>0 the frequency when suspended will be capped
  77. * by this frequency. Also will wake up at max frequency of policy
  78. * to minimize wakeup issues.
  79. * Set sleep_max_freq=0 to disable this behavior.
  80. */
  81. #define DEFAULT_SLEEP_MAX_FREQ 537600
  82. static unsigned int sleep_max_freq;
  83.  
  84. /*
  85. * Sampling rate, I highly recommend to leave it at 2.
  86. */
  87. #define DEFAULT_SAMPLE_RATE_JIFFIES 2
  88. static unsigned int sample_rate_jiffies;
  89.  
  90. /*
  91. * Freqeuncy delta when ramping up.
  92. * zero disables causes to always jump straight to max frequency.
  93. */
  94. #define DEFAULT_RAMP_UP_STEP 614400
  95. static unsigned int ramp_up_step;
  96.  
  97. /*
  98. * Max freqeuncy delta when ramping down. zero disables.
  99. */
  100. #define DEFAULT_MAX_RAMP_DOWN 384000
  101. static unsigned int max_ramp_down;
  102.  
  103. /*
  104. * CPU freq will be increased if measured load > max_cpu_load;
  105. */
  106. #define DEFAULT_MAX_CPU_LOAD 75
  107. static unsigned long max_cpu_load;
  108.  
  109. /*
  110. * CPU freq will be decreased if measured load < min_cpu_load;
  111. */
  112. #define DEFAULT_MIN_CPU_LOAD 30
  113. static unsigned long min_cpu_load;
  114.  
  115.  
  116. static int cpufreq_governor_smartass(struct cpufreq_policy *policy,
  117. unsigned int event);
  118.  
  119. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS
  120. static
  121. #endif
  122. struct cpufreq_governor cpufreq_gov_smartass = {
  123. .name = "smartass",
  124. .governor = cpufreq_governor_smartass,
  125. .max_transition_latency = 6000000,
  126. .owner = THIS_MODULE,
  127. };
  128.  
  129. static void cpufreq_smartass_timer(unsigned long data)
  130. {
  131. u64 delta_idle;
  132. u64 update_time;
  133. u64 now_idle;
  134. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, data);
  135. struct cpufreq_policy *policy = this_smartass->cur_policy;
  136.  
  137. now_idle = get_cpu_idle_time_us(data, &update_time);
  138.  
  139. if (update_time == this_smartass->idle_exit_time)
  140. return;
  141.  
  142. delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle);
  143. //printk(KERN_INFO "smartass: t=%llu i=%llu\n",cputime64_sub(update_time,this_smartass->idle_exit_time),delta_idle);
  144.  
  145. /* Scale up if there were no idle cycles since coming out of idle */
  146. if (delta_idle == 0) {
  147. if (policy->cur == policy->max)
  148. return;
  149.  
  150. if (nr_running() < 1)
  151. return;
  152.  
  153. this_smartass->force_ramp_up = 1;
  154. cpumask_set_cpu(data, &work_cpumask);
  155. queue_work(up_wq, &freq_scale_work);
  156. return;
  157. }
  158.  
  159. /*
  160. * There is a window where if the cpu utlization can go from low to high
  161. * between the timer expiring, delta_idle will be > 0 and the cpu will
  162. * be 100% busy, preventing idle from running, and this timer from
  163. * firing. So setup another timer to fire to check cpu utlization.
  164. * Do not setup the timer if there is no scheduled work.
  165. */
  166. if (!timer_pending(&this_smartass->timer) && nr_running() > 0) {
  167. this_smartass->time_in_idle = get_cpu_idle_time_us(
  168. data, &this_smartass->idle_exit_time);
  169. mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
  170. }
  171.  
  172. if (policy->cur == policy->min)
  173. return;
  174.  
  175. /*
  176. * Do not scale down unless we have been at this frequency for the
  177. * minimum sample time.
  178. */
  179. if (cputime64_sub(update_time, freq_change_time) < down_rate_us)
  180. return;
  181.  
  182. cpumask_set_cpu(data, &work_cpumask);
  183. queue_work(down_wq, &freq_scale_work);
  184. }
  185.  
  186. static void cpufreq_idle(void)
  187. {
  188. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
  189. struct cpufreq_policy *policy = this_smartass->cur_policy;
  190.  
  191. pm_idle_old();
  192.  
  193. if (!cpumask_test_cpu(smp_processor_id(), policy->cpus))
  194. return;
  195.  
  196. /* Timer to fire in 1-2 ticks, jiffie aligned. */
  197. if (timer_pending(&this_smartass->timer) == 0) {
  198. this_smartass->time_in_idle = get_cpu_idle_time_us(
  199. smp_processor_id(), &this_smartass->idle_exit_time);
  200. mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
  201. }
  202. }
  203.  
  204. /*
  205. * Choose the cpu frequency based off the load. For now choose the minimum
  206. * frequency that will satisfy the load, which is not always the lower power.
  207. */
  208. static unsigned int cpufreq_smartass_calc_freq(unsigned int cpu, struct cpufreq_policy *policy)
  209. {
  210. unsigned int delta_time;
  211. unsigned int idle_time;
  212. unsigned int cpu_load;
  213. unsigned int new_freq;
  214. u64 current_wall_time;
  215. u64 current_idle_time;
  216.  
  217. current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time);
  218.  
  219. idle_time = (unsigned int)( current_idle_time - freq_change_time_in_idle );
  220. delta_time = (unsigned int)( current_wall_time - freq_change_time );
  221.  
  222. cpu_load = 100 * (delta_time - idle_time) / delta_time;
  223. //printk(KERN_INFO "Smartass calc_freq: delta_time=%u cpu_load=%u\n",delta_time,cpu_load);
  224. if (cpu_load < min_cpu_load) {
  225. cpu_load += 100 - max_cpu_load; // dummy load.
  226. new_freq = policy->cur * cpu_load / 100;
  227. if (max_ramp_down && new_freq < policy->cur - max_ramp_down)
  228. new_freq = policy->cur - max_ramp_down;
  229. //printk(KERN_INFO "Smartass calc_freq: %u => %u\n",policy->cur,new_freq);
  230. return new_freq;
  231. } if (cpu_load > max_cpu_load) {
  232. if (ramp_up_step)
  233. new_freq = policy->cur + ramp_up_step;
  234. else
  235. new_freq = policy->max;
  236. return new_freq;
  237. }
  238. return policy->cur;
  239. }
  240.  
  241. /* We use the same work function to sale up and down */
  242. static void cpufreq_smartass_freq_change_time_work(struct work_struct *work)
  243. {
  244. unsigned int cpu;
  245. unsigned int new_freq;
  246. struct smartass_info_s *this_smartass;
  247. struct cpufreq_policy *policy;
  248. cpumask_t tmp_mask = work_cpumask;
  249. for_each_cpu(cpu, tmp_mask) {
  250. this_smartass = &per_cpu(smartass_info, cpu);
  251. policy = this_smartass->cur_policy;
  252.  
  253. if (this_smartass->force_ramp_up) {
  254. this_smartass->force_ramp_up = 0;
  255.  
  256. if (nr_running() == 1) {
  257. cpumask_clear_cpu(cpu, &work_cpumask);
  258. return;
  259. }
  260.  
  261. if (policy->cur == policy->max)
  262. return;
  263.  
  264. if (ramp_up_step)
  265. new_freq = policy->cur + ramp_up_step;
  266. else
  267. new_freq = policy->max;
  268.  
  269. if (suspended && sleep_max_freq) {
  270. if (new_freq > sleep_max_freq)
  271. new_freq = sleep_max_freq;
  272. } else {
  273. if (new_freq < up_min_freq)
  274. new_freq = up_min_freq;
  275. }
  276.  
  277. } else {
  278. new_freq = cpufreq_smartass_calc_freq(cpu,policy);
  279.  
  280. // in suspend limit to sleep_max_freq and
  281. // jump straight to sleep_max_freq to avoid wakeup problems
  282. if (suspended && sleep_max_freq &&
  283. (new_freq > sleep_max_freq || new_freq > policy->cur))
  284. new_freq = sleep_max_freq;
  285. }
  286.  
  287. if (new_freq > policy->max)
  288. new_freq = policy->max;
  289.  
  290. if (new_freq < policy->min)
  291. new_freq = policy->min;
  292.  
  293. __cpufreq_driver_target(policy, new_freq,
  294. CPUFREQ_RELATION_L);
  295.  
  296. freq_change_time_in_idle = get_cpu_idle_time_us(cpu,
  297. &freq_change_time);
  298.  
  299. cpumask_clear_cpu(cpu, &work_cpumask);
  300. }
  301.  
  302.  
  303. }
  304.  
  305. static ssize_t show_down_rate_us(struct cpufreq_policy *policy, char *buf)
  306. {
  307. return sprintf(buf, "%lu\n", down_rate_us);
  308. }
  309.  
  310. static ssize_t store_down_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count)
  311. {
  312.         ssize_t res;
  313. unsigned long input;
  314. res = strict_strtoul(buf, 0, &input);
  315. if (res >= 0 && input >= 1000 && input <= 100000000)
  316. down_rate_us = input;
  317. return res;
  318. }
  319.  
  320. static struct freq_attr down_rate_us_attr = __ATTR(down_rate_us, 0644,
  321. show_down_rate_us, store_down_rate_us);
  322.  
  323. static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf)
  324. {
  325. return sprintf(buf, "%u\n", up_min_freq);
  326. }
  327.  
  328. static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
  329. {
  330.         ssize_t res;
  331. unsigned long input;
  332. res = strict_strtoul(buf, 0, &input);
  333. if (res >= 0 && input >= 0)
  334. up_min_freq = input;
  335. return res;
  336. }
  337.  
  338. static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644,
  339. show_up_min_freq, store_up_min_freq);
  340.  
  341. static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf)
  342. {
  343. return sprintf(buf, "%u\n", sleep_max_freq);
  344. }
  345.  
  346. static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
  347. {
  348.         ssize_t res;
  349. unsigned long input;
  350. res = strict_strtoul(buf, 0, &input);
  351. if (res >= 0 && input >= 0)
  352. sleep_max_freq = input;
  353. return res;
  354. }
  355.  
  356. static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644,
  357. show_sleep_max_freq, store_sleep_max_freq);
  358.  
  359. static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf)
  360. {
  361. return sprintf(buf, "%u\n", sample_rate_jiffies);
  362. }
  363.  
  364. static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count)
  365. {
  366.         ssize_t res;
  367. unsigned long input;
  368. res = strict_strtoul(buf, 0, &input);
  369. if (res >= 0 && input > 0 && input <= 1000)
  370. sample_rate_jiffies = input;
  371. return res;
  372. }
  373.  
  374. static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644,
  375. show_sample_rate_jiffies, store_sample_rate_jiffies);
  376.  
  377. static ssize_t show_ramp_up_step(struct cpufreq_policy *policy, char *buf)
  378. {
  379. return sprintf(buf, "%u\n", ramp_up_step);
  380. }
  381.  
  382. static ssize_t store_ramp_up_step(struct cpufreq_policy *policy, const char *buf, size_t count)
  383. {
  384.         ssize_t res;
  385. unsigned long input;
  386. res = strict_strtoul(buf, 0, &input);
  387. if (res >= 0)
  388. ramp_up_step = input;
  389. return res;
  390. }
  391.  
  392. static struct freq_attr ramp_up_step_attr = __ATTR(ramp_up_step, 0644,
  393. show_ramp_up_step, store_ramp_up_step);
  394.  
  395. static ssize_t show_max_ramp_down(struct cpufreq_policy *policy, char *buf)
  396. {
  397. return sprintf(buf, "%u\n", max_ramp_down);
  398. }
  399.  
  400. static ssize_t store_max_ramp_down(struct cpufreq_policy *policy, const char *buf, size_t count)
  401. {
  402.         ssize_t res;
  403. unsigned long input;
  404. res = strict_strtoul(buf, 0, &input);
  405. if (res >= 0)
  406. max_ramp_down = input;
  407. return res;
  408. }
  409.  
  410. static struct freq_attr max_ramp_down_attr = __ATTR(max_ramp_down, 0644,
  411. show_max_ramp_down, store_max_ramp_down);
  412.  
  413. static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf)
  414. {
  415. return sprintf(buf, "%lu\n", max_cpu_load);
  416. }
  417.  
  418. static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
  419. {
  420.         ssize_t res;
  421. unsigned long input;
  422. res = strict_strtoul(buf, 0, &input);
  423. if (res >= 0 && input > 0 && input <= 100)
  424. max_cpu_load = input;
  425. return res;
  426. }
  427.  
  428. static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644,
  429. show_max_cpu_load, store_max_cpu_load);
  430.  
  431. static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf)
  432. {
  433. return sprintf(buf, "%lu\n", min_cpu_load);
  434. }
  435.  
  436. static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
  437. {
  438.         ssize_t res;
  439. unsigned long input;
  440. res = strict_strtoul(buf, 0, &input);
  441. if (res >= 0 && input > 0 && input < 100)
  442. min_cpu_load = input;
  443. return res;
  444. }
  445.  
  446. static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644,
  447. show_min_cpu_load, store_min_cpu_load);
  448.  
  449. static struct attribute * smartass_attributes[] = {
  450. &down_rate_us_attr.attr,
  451. &up_min_freq_attr.attr,
  452. &sleep_max_freq_attr.attr,
  453. &sample_rate_jiffies_attr.attr,
  454. &ramp_up_step_attr.attr,
  455. &max_ramp_down_attr.attr,
  456. &max_cpu_load_attr.attr,
  457. &min_cpu_load_attr.attr,
  458. NULL,
  459. };
  460.  
  461. static struct attribute_group smartass_attr_group = {
  462. .attrs = smartass_attributes,
  463. .name = "smartass",
  464. };
  465.  
  466. static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy,
  467. unsigned int event)
  468. {
  469. unsigned int cpu = new_policy->cpu;
  470. int rc;
  471. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
  472.  
  473. switch (event) {
  474. case CPUFREQ_GOV_START:
  475. if ((!cpu_online(cpu)) || (!new_policy->cur))
  476. return -EINVAL;
  477.  
  478. if (this_smartass->enable) /* Already enabled */
  479. break;
  480.  
  481. /*
  482. * Do not register the idle hook and create sysfs
  483. * entries if we have already done so.
  484. */
  485. if (atomic_inc_return(&active_count) > 1)
  486. return 0;
  487.  
  488. rc = sysfs_create_group(&new_policy->kobj, &smartass_attr_group);
  489. if (rc)
  490. return rc;
  491. pm_idle_old = pm_idle;
  492. pm_idle = cpufreq_idle;
  493.  
  494. this_smartass->cur_policy = new_policy;
  495. this_smartass->cur_policy->max = CONFIG_MSM_CPU_FREQ_ONDEMAND_MAX;
  496. this_smartass->cur_policy->min = CONFIG_MSM_CPU_FREQ_ONDEMAND_MIN;
  497. this_smartass->cur_policy->cur = CONFIG_MSM_CPU_FREQ_ONDEMAND_MAX;
  498. this_smartass->enable = 1;
  499.  
  500. // notice no break here!
  501.  
  502. case CPUFREQ_GOV_LIMITS:
  503. if (this_smartass->cur_policy->cur != new_policy->max)
  504. __cpufreq_driver_target(new_policy, new_policy->max, CPUFREQ_RELATION_H);
  505.  
  506. break;
  507.  
  508. case CPUFREQ_GOV_STOP:
  509. this_smartass->enable = 0;
  510.  
  511. if (atomic_dec_return(&active_count) > 1)
  512. return 0;
  513. sysfs_remove_group(&new_policy->kobj,
  514. &smartass_attr_group);
  515.  
  516. pm_idle = pm_idle_old;
  517. del_timer(&this_smartass->timer);
  518. break;
  519. }
  520.  
  521. return 0;
  522. }
  523.  
  524. static void smartass_suspend(int cpu, int suspend)
  525. {
  526. struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
  527. struct cpufreq_policy *policy = this_smartass->cur_policy;
  528. unsigned int new_freq;
  529.  
  530. if (!this_smartass->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0
  531. return;
  532.  
  533. if (suspend) {
  534. if (policy->cur > sleep_max_freq) {
  535. new_freq = sleep_max_freq;
  536. if (new_freq > policy->max)
  537. new_freq = policy->max;
  538. if (new_freq < policy->min)
  539. new_freq = policy->min;
  540. __cpufreq_driver_target(policy, new_freq,
  541. CPUFREQ_RELATION_H);
  542. }
  543. } else { // resume at max speed:
  544. __cpufreq_driver_target(policy, policy->max,
  545. CPUFREQ_RELATION_H);
  546. }
  547.  
  548. }
  549.  
  550. static void smartass_early_suspend(struct early_suspend *handler) {
  551. int i;
  552. suspended = 1;
  553. for_each_online_cpu(i)
  554. smartass_suspend(i,1);
  555. }
  556.  
  557. static void smartass_late_resume(struct early_suspend *handler) {
  558. int i;
  559. suspended = 0;
  560. for_each_online_cpu(i)
  561. smartass_suspend(i,0);
  562. }
  563.  
  564. static struct early_suspend smartass_power_suspend = {
  565. .suspend = smartass_early_suspend,
  566. .resume = smartass_late_resume,
  567. };
  568.  
  569. static int __init cpufreq_smartass_init(void)
  570. {  
  571. unsigned int i;
  572. struct smartass_info_s *this_smartass;
  573. down_rate_us = DEFAULT_DOWN_RATE_US;
  574. up_min_freq = DEFAULT_UP_MIN_FREQ;
  575. sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ;
  576. sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES;
  577. ramp_up_step = DEFAULT_RAMP_UP_STEP;
  578. max_ramp_down = DEFAULT_MAX_RAMP_DOWN;
  579. max_cpu_load = DEFAULT_MAX_CPU_LOAD;
  580. min_cpu_load = DEFAULT_MIN_CPU_LOAD;
  581.  
  582. suspended = 0;
  583.  
  584. /* Initalize per-cpu data: */
  585. for_each_possible_cpu(i) {
  586. this_smartass = &per_cpu(smartass_info, i);
  587. this_smartass->enable = 0;
  588. this_smartass->force_ramp_up = 0;
  589. this_smartass->time_in_idle = 0;
  590. this_smartass->idle_exit_time = 0;
  591. // intialize timer:
  592. init_timer_deferrable(&this_smartass->timer);
  593. this_smartass->timer.function = cpufreq_smartass_timer;
  594. this_smartass->timer.data = i;
  595. }
  596.  
  597. /* Scale up is high priority */
  598. up_wq = create_rt_workqueue("ksmartass_up");
  599. down_wq = create_workqueue("ksmartass_down");
  600.  
  601. INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work);
  602.  
  603. register_early_suspend(&smartass_power_suspend);
  604.  
  605. return cpufreq_register_governor(&cpufreq_gov_smartass);
  606. }
  607.  
  608. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS
  609. pure_initcall(cpufreq_smartass_init);
  610. #else
  611. module_init(cpufreq_smartass_init);
  612. #endif
  613.  
  614. static void __exit cpufreq_smartass_exit(void)
  615. {
  616. cpufreq_unregister_governor(&cpufreq_gov_smartass);
  617. destroy_workqueue(up_wq);
  618. destroy_workqueue(down_wq);
  619. }
  620.  
  621. module_exit(cpufreq_smartass_exit);
  622.  
  623. MODULE_AUTHOR ("Erasmux");
  624. MODULE_DESCRIPTION ("'cpufreq_smartass' - A smart cpufreq governor");
  625. MODULE_LICENSE ("GPL");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement