Advertisement
Forzaferrarileo

cpufreq_hotplug.c

Jun 9th, 2013
64
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 21.42 KB | None | 0 0
  1. /*
  2. * CPUFreq hotplug governor
  3. *
  4. * Copyright (C) 2010 Texas Instruments, Inc.
  5. * Mike Turquette <mturquette@ti.com>
  6. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  7. *
  8. * Based on ondemand governor
  9. * Copyright (C) 2001 Russell King
  10. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>,
  11. * Jun Nakajima <jun.nakajima@intel.com>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License version 2 as
  15. * published by the Free Software Foundation.
  16. */
  17.  
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/cpufreq.h>
  22. #include <linux/cpu.h>
  23. #include <linux/jiffies.h>
  24. #include <linux/kernel_stat.h>
  25. #include <linux/mutex.h>
  26. #include <linux/hrtimer.h>
  27. #include <linux/tick.h>
  28. #include <linux/ktime.h>
  29. #include <linux/sched.h>
  30. #include <linux/err.h>
  31. #include <linux/slab.h>
  32.  
  33. /* greater than 80% avg load across online CPUs increases frequency */
  34. #define DEFAULT_UP_FREQ_MIN_LOAD (75)
  35.  
  36. /* Keep 10% of idle under the up threshold when decreasing the frequency */
  37. #define DEFAULT_FREQ_DOWN_DIFFERENTIAL (25)
  38.  
  39. /* less than 35% avg load across online CPUs decreases frequency */
  40. #define DEFAULT_DOWN_FREQ_MAX_LOAD (35)
  41.  
  42. /* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */
  43. #define DEFAULT_SAMPLING_PERIOD (100000)
  44.  
  45. /* default number of sampling periods to average before hotplug-in decision */
  46. #define DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS (6)
  47.  
  48. /* default number of sampling periods to average before hotplug-out decision */
  49. #define DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS (4)
  50.  
  51. static void do_dbs_timer(struct work_struct *work);
  52. static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
  53. unsigned int event);
  54.  
  55. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
  56. static
  57. #endif
  58. struct cpufreq_governor cpufreq_gov_hotplug = {
  59. .name = "hotplug",
  60. .governor = cpufreq_governor_dbs,
  61. .owner = THIS_MODULE,
  62. };
  63.  
  64. struct cpu_dbs_info_s {
  65. cputime64_t prev_cpu_idle;
  66. cputime64_t prev_cpu_wall;
  67. cputime64_t prev_cpu_nice;
  68. struct cpufreq_policy *cur_policy;
  69. struct delayed_work work;
  70. struct work_struct cpu_up_work;
  71. struct work_struct cpu_down_work;
  72. struct cpufreq_frequency_table *freq_table;
  73. int cpu;
  74. /*
  75. * percpu mutex that serializes governor limit change with
  76. * do_dbs_timer invocation. We do not want do_dbs_timer to run
  77. * when user is changing the governor or limits.
  78. */
  79. struct mutex timer_mutex;
  80. };
  81. static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info);
  82.  
  83. static unsigned int dbs_enable; /* number of CPUs using this policy */
  84.  
  85. /*
  86. * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
  87. * different CPUs. It protects dbs_enable in governor start/stop.
  88. */
  89. static DEFINE_MUTEX(dbs_mutex);
  90.  
  91. static struct workqueue_struct *khotplug_wq;
  92.  
  93. static struct dbs_tuners {
  94. unsigned int sampling_rate;
  95. unsigned int up_threshold;
  96. unsigned int down_differential;
  97. unsigned int down_threshold;
  98. unsigned int hotplug_in_sampling_periods;
  99. unsigned int hotplug_out_sampling_periods;
  100. unsigned int hotplug_load_index;
  101. unsigned int *hotplug_load_history;
  102. unsigned int ignore_nice;
  103. unsigned int io_is_busy;
  104. } dbs_tuners_ins = {
  105. .sampling_rate = DEFAULT_SAMPLING_PERIOD,
  106. .up_threshold = DEFAULT_UP_FREQ_MIN_LOAD,
  107. .down_differential = DEFAULT_FREQ_DOWN_DIFFERENTIAL,
  108. .down_threshold = DEFAULT_DOWN_FREQ_MAX_LOAD,
  109. .hotplug_in_sampling_periods = DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS,
  110. .hotplug_out_sampling_periods = DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS,
  111. .hotplug_load_index = 0,
  112. .ignore_nice = 0,
  113. .io_is_busy = 0,
  114. };
  115.  
  116. /*
  117. * A corner case exists when switching io_is_busy at run-time: comparing idle
  118. * times from a non-io_is_busy period to an io_is_busy period (or vice-versa)
  119. * will misrepresent the actual change in system idleness. We ignore this
  120. * corner case: enabling io_is_busy might cause freq increase and disabling
  121. * might cause freq decrease, which probably matches the original intent.
  122. */
  123. static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
  124. {
  125. u64 idle_time;
  126. u64 iowait_time;
  127.  
  128. /* cpufreq-hotplug always assumes CONFIG_NO_HZ */
  129. idle_time = get_cpu_idle_time_us(cpu, wall);
  130.  
  131. /* add time spent doing I/O to idle time */
  132. if (dbs_tuners_ins.io_is_busy) {
  133. iowait_time = get_cpu_iowait_time_us(cpu, wall);
  134. /* cpufreq-hotplug always assumes CONFIG_NO_HZ */
  135. if (iowait_time != -1ULL && idle_time >= iowait_time)
  136. idle_time -= iowait_time;
  137. }
  138.  
  139. return idle_time;
  140. }
  141.  
  142. /************************** sysfs interface ************************/
  143.  
  144. /* XXX look at global sysfs macros in cpufreq.h, can those be used here? */
  145.  
  146. /* cpufreq_hotplug Governor Tunables */
  147. #define show_one(file_name, object) \
  148. static ssize_t show_##file_name \
  149. (struct kobject *kobj, struct attribute *attr, char *buf) \
  150. { \
  151. return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
  152. }
  153. show_one(sampling_rate, sampling_rate);
  154. show_one(up_threshold, up_threshold);
  155. show_one(down_differential, down_differential);
  156. show_one(down_threshold, down_threshold);
  157. show_one(hotplug_in_sampling_periods, hotplug_in_sampling_periods);
  158. show_one(hotplug_out_sampling_periods, hotplug_out_sampling_periods);
  159. show_one(ignore_nice_load, ignore_nice);
  160. show_one(io_is_busy, io_is_busy);
  161.  
  162. static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
  163. const char *buf, size_t count)
  164. {
  165. unsigned int input;
  166. int ret;
  167. ret = sscanf(buf, "%u", &input);
  168. if (ret != 1)
  169. return -EINVAL;
  170.  
  171. mutex_lock(&dbs_mutex);
  172. dbs_tuners_ins.sampling_rate = input;
  173. mutex_unlock(&dbs_mutex);
  174.  
  175. return count;
  176. }
  177.  
  178. static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
  179. const char *buf, size_t count)
  180. {
  181. unsigned int input;
  182. int ret;
  183. ret = sscanf(buf, "%u", &input);
  184.  
  185. if (ret != 1 || input <= dbs_tuners_ins.down_threshold) {
  186. return -EINVAL;
  187. }
  188.  
  189. mutex_lock(&dbs_mutex);
  190. dbs_tuners_ins.up_threshold = input;
  191. mutex_unlock(&dbs_mutex);
  192.  
  193. return count;
  194. }
  195.  
  196. static ssize_t store_down_differential(struct kobject *a, struct attribute *b,
  197. const char *buf, size_t count)
  198. {
  199. unsigned int input;
  200. int ret;
  201. ret = sscanf(buf, "%u", &input);
  202.  
  203. if (ret != 1 || input >= dbs_tuners_ins.up_threshold)
  204. return -EINVAL;
  205.  
  206. mutex_lock(&dbs_mutex);
  207. dbs_tuners_ins.down_differential = input;
  208. mutex_unlock(&dbs_mutex);
  209.  
  210. return count;
  211. }
  212.  
  213. static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
  214. const char *buf, size_t count)
  215. {
  216. unsigned int input;
  217. int ret;
  218. ret = sscanf(buf, "%u", &input);
  219.  
  220. if (ret != 1 || input >= dbs_tuners_ins.up_threshold) {
  221. return -EINVAL;
  222. }
  223.  
  224. mutex_lock(&dbs_mutex);
  225. dbs_tuners_ins.down_threshold = input;
  226. mutex_unlock(&dbs_mutex);
  227.  
  228. return count;
  229. }
  230.  
  231. static ssize_t store_hotplug_in_sampling_periods(struct kobject *a,
  232. struct attribute *b, const char *buf, size_t count)
  233. {
  234. unsigned int input;
  235. unsigned int *temp;
  236. unsigned int max_windows;
  237. int ret;
  238. ret = sscanf(buf, "%u", &input);
  239.  
  240. if (ret != 1)
  241. return -EINVAL;
  242.  
  243. /* already using this value, bail out */
  244. if (input == dbs_tuners_ins.hotplug_in_sampling_periods)
  245. return count;
  246.  
  247. mutex_lock(&dbs_mutex);
  248. ret = count;
  249. max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods,
  250. dbs_tuners_ins.hotplug_out_sampling_periods);
  251.  
  252. /* no need to resize array */
  253. if (input <= max_windows) {
  254. dbs_tuners_ins.hotplug_in_sampling_periods = input;
  255. goto out;
  256. }
  257.  
  258. /* resize array */
  259. temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL);
  260.  
  261. if (!temp || IS_ERR(temp)) {
  262. ret = -ENOMEM;
  263. goto out;
  264. }
  265.  
  266. memcpy(temp, dbs_tuners_ins.hotplug_load_history,
  267. (max_windows * sizeof(unsigned int)));
  268. kfree(dbs_tuners_ins.hotplug_load_history);
  269.  
  270. /* replace old buffer, old number of sampling periods & old index */
  271. dbs_tuners_ins.hotplug_load_history = temp;
  272. dbs_tuners_ins.hotplug_in_sampling_periods = input;
  273. dbs_tuners_ins.hotplug_load_index = max_windows;
  274. out:
  275. mutex_unlock(&dbs_mutex);
  276.  
  277. return ret;
  278. }
  279.  
  280. static ssize_t store_hotplug_out_sampling_periods(struct kobject *a,
  281. struct attribute *b, const char *buf, size_t count)
  282. {
  283. unsigned int input;
  284. unsigned int *temp;
  285. unsigned int max_windows;
  286. int ret;
  287. ret = sscanf(buf, "%u", &input);
  288.  
  289. if (ret != 1)
  290. return -EINVAL;
  291.  
  292. /* already using this value, bail out */
  293. if (input == dbs_tuners_ins.hotplug_out_sampling_periods)
  294. return count;
  295.  
  296. mutex_lock(&dbs_mutex);
  297. ret = count;
  298. max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods,
  299. dbs_tuners_ins.hotplug_out_sampling_periods);
  300.  
  301. /* no need to resize array */
  302. if (input <= max_windows) {
  303. dbs_tuners_ins.hotplug_out_sampling_periods = input;
  304. goto out;
  305. }
  306.  
  307. /* resize array */
  308. temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL);
  309.  
  310. if (!temp || IS_ERR(temp)) {
  311. ret = -ENOMEM;
  312. goto out;
  313. }
  314.  
  315. memcpy(temp, dbs_tuners_ins.hotplug_load_history,
  316. (max_windows * sizeof(unsigned int)));
  317. kfree(dbs_tuners_ins.hotplug_load_history);
  318.  
  319. /* replace old buffer, old number of sampling periods & old index */
  320. dbs_tuners_ins.hotplug_load_history = temp;
  321. dbs_tuners_ins.hotplug_out_sampling_periods = input;
  322. dbs_tuners_ins.hotplug_load_index = max_windows;
  323. out:
  324. mutex_unlock(&dbs_mutex);
  325.  
  326. return ret;
  327. }
  328.  
  329. static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
  330. const char *buf, size_t count)
  331. {
  332. unsigned int input;
  333. int ret;
  334.  
  335. unsigned int j;
  336.  
  337. ret = sscanf(buf, "%u", &input);
  338. if (ret != 1)
  339. return -EINVAL;
  340.  
  341. if (input > 1)
  342. input = 1;
  343.  
  344. mutex_lock(&dbs_mutex);
  345. if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
  346. mutex_unlock(&dbs_mutex);
  347. return count;
  348. }
  349. dbs_tuners_ins.ignore_nice = input;
  350.  
  351. /* we need to re-evaluate prev_cpu_idle */
  352. for_each_online_cpu(j) {
  353. struct cpu_dbs_info_s *dbs_info;
  354. dbs_info = &per_cpu(hp_cpu_dbs_info, j);
  355. dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  356. &dbs_info->prev_cpu_wall);
  357. if (dbs_tuners_ins.ignore_nice)
  358. dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
  359.  
  360. }
  361. mutex_unlock(&dbs_mutex);
  362.  
  363. return count;
  364. }
  365.  
  366. static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
  367. const char *buf, size_t count)
  368. {
  369. unsigned int input;
  370. int ret;
  371.  
  372. ret = sscanf(buf, "%u", &input);
  373. if (ret != 1)
  374. return -EINVAL;
  375.  
  376. mutex_lock(&dbs_mutex);
  377. dbs_tuners_ins.io_is_busy = !!input;
  378. mutex_unlock(&dbs_mutex);
  379.  
  380. return count;
  381. }
  382.  
  383. define_one_global_rw(sampling_rate);
  384. define_one_global_rw(up_threshold);
  385. define_one_global_rw(down_differential);
  386. define_one_global_rw(down_threshold);
  387. define_one_global_rw(hotplug_in_sampling_periods);
  388. define_one_global_rw(hotplug_out_sampling_periods);
  389. define_one_global_rw(ignore_nice_load);
  390. define_one_global_rw(io_is_busy);
  391.  
  392. static struct attribute *dbs_attributes[] = {
  393. &sampling_rate.attr,
  394. &up_threshold.attr,
  395. &down_differential.attr,
  396. &down_threshold.attr,
  397. &hotplug_in_sampling_periods.attr,
  398. &hotplug_out_sampling_periods.attr,
  399. &ignore_nice_load.attr,
  400. &io_is_busy.attr,
  401. NULL
  402. };
  403.  
  404. static struct attribute_group dbs_attr_group = {
  405. .attrs = dbs_attributes,
  406. .name = "hotplug",
  407. };
  408.  
  409. /************************** sysfs end ************************/
  410.  
  411. static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
  412. {
  413. /* combined load of all enabled CPUs */
  414. unsigned int total_load = 0;
  415. /* single largest CPU load percentage*/
  416. unsigned int max_load = 0;
  417. /* largest CPU load in terms of frequency */
  418. unsigned int max_load_freq = 0;
  419. /* average load across all enabled CPUs */
  420. unsigned int avg_load = 0;
  421. /* average load across multiple sampling periods for hotplug events */
  422. unsigned int hotplug_in_avg_load = 0;
  423. unsigned int hotplug_out_avg_load = 0;
  424. /* number of sampling periods averaged for hotplug decisions */
  425. unsigned int periods;
  426.  
  427. struct cpufreq_policy *policy;
  428. unsigned int i, j;
  429.  
  430. policy = this_dbs_info->cur_policy;
  431.  
  432. /*
  433. * cpu load accounting
  434. * get highest load, total load and average load across all CPUs
  435. */
  436. for_each_cpu(j, policy->cpus) {
  437. unsigned int load;
  438. unsigned int idle_time, wall_time;
  439. cputime64_t cur_wall_time, cur_idle_time;
  440. struct cpu_dbs_info_s *j_dbs_info;
  441.  
  442. j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
  443.  
  444. /* update both cur_idle_time and cur_wall_time */
  445. cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
  446.  
  447. /* how much wall time has passed since last iteration? */
  448. wall_time = (unsigned int) cputime64_sub(cur_wall_time,
  449. j_dbs_info->prev_cpu_wall);
  450. j_dbs_info->prev_cpu_wall = cur_wall_time;
  451.  
  452. /* how much idle time has passed since last iteration? */
  453. idle_time = (unsigned int) cputime64_sub(cur_idle_time,
  454. j_dbs_info->prev_cpu_idle);
  455. j_dbs_info->prev_cpu_idle = cur_idle_time;
  456.  
  457. if (unlikely(!wall_time || wall_time < idle_time))
  458. continue;
  459.  
  460. /* load is the percentage of time not spent in idle */
  461. load = 100 * (wall_time - idle_time) / wall_time;
  462.  
  463. /* keep track of combined load across all CPUs */
  464. total_load += load;
  465.  
  466. /* keep track of highest single load across all CPUs */
  467. if (load > max_load)
  468. max_load = load;
  469. }
  470.  
  471. /* use the max load in the OPP freq change policy */
  472. max_load_freq = max_load * policy->cur;
  473.  
  474. /* calculate the average load across all related CPUs */
  475. avg_load = total_load / num_online_cpus();
  476.  
  477. mutex_lock(&dbs_mutex);
  478.  
  479. /*
  480. * hotplug load accounting
  481. * average load over multiple sampling periods
  482. */
  483.  
  484. /* how many sampling periods do we use for hotplug decisions? */
  485. periods = max(dbs_tuners_ins.hotplug_in_sampling_periods,
  486. dbs_tuners_ins.hotplug_out_sampling_periods);
  487.  
  488. /* store avg_load in the circular buffer */
  489. dbs_tuners_ins.hotplug_load_history[dbs_tuners_ins.hotplug_load_index]
  490. = avg_load;
  491.  
  492. /* compute average load across in & out sampling periods */
  493. for (i = 0, j = dbs_tuners_ins.hotplug_load_index;
  494. i < periods; i++, j--) {
  495. if (i < dbs_tuners_ins.hotplug_in_sampling_periods)
  496. hotplug_in_avg_load +=
  497. dbs_tuners_ins.hotplug_load_history[j];
  498. if (i < dbs_tuners_ins.hotplug_out_sampling_periods)
  499. hotplug_out_avg_load +=
  500. dbs_tuners_ins.hotplug_load_history[j];
  501.  
  502. if (j == 0)
  503. j = periods;
  504. }
  505.  
  506. hotplug_in_avg_load = hotplug_in_avg_load /
  507. dbs_tuners_ins.hotplug_in_sampling_periods;
  508.  
  509. hotplug_out_avg_load = hotplug_out_avg_load /
  510. dbs_tuners_ins.hotplug_out_sampling_periods;
  511.  
  512. /* return to first element if we're at the circular buffer's end */
  513. if (++dbs_tuners_ins.hotplug_load_index == periods)
  514. dbs_tuners_ins.hotplug_load_index = 0;
  515.  
  516. /* check if auxiliary CPU is needed based on avg_load */
  517. if (avg_load > dbs_tuners_ins.up_threshold) {
  518. /* should we enable auxillary CPUs? */
  519. if (num_online_cpus() < num_possible_cpus() && hotplug_in_avg_load >
  520. dbs_tuners_ins.up_threshold) {
  521. queue_work_on(this_dbs_info->cpu, khotplug_wq,
  522. &this_dbs_info->cpu_up_work);
  523. goto out;
  524. }
  525. }
  526.  
  527. /* check for frequency increase based on max_load */
  528. if (max_load > dbs_tuners_ins.up_threshold) {
  529. /* increase to highest frequency supported */
  530. if (policy->cur < policy->max)
  531. __cpufreq_driver_target(policy, policy->max,
  532. CPUFREQ_RELATION_H);
  533.  
  534. goto out;
  535. }
  536.  
  537. /* check for frequency decrease */
  538. if (avg_load < dbs_tuners_ins.down_threshold) {
  539. /* are we at the minimum frequency already? */
  540. if (policy->cur == policy->min) {
  541. /* should we disable auxillary CPUs? */
  542. if (num_online_cpus() > 1 && hotplug_out_avg_load <
  543. dbs_tuners_ins.down_threshold) {
  544. queue_work_on(this_dbs_info->cpu, khotplug_wq,
  545. &this_dbs_info->cpu_down_work);
  546. }
  547. goto out;
  548. }
  549. }
  550.  
  551. /*
  552. * go down to the lowest frequency which can sustain the load by
  553. * keeping 30% of idle in order to not cross the up_threshold
  554. */
  555. if ((max_load_freq <
  556. (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
  557. policy->cur) && (policy->cur > policy->min)) {
  558. unsigned int freq_next;
  559. freq_next = max_load_freq /
  560. (dbs_tuners_ins.up_threshold -
  561. dbs_tuners_ins.down_differential);
  562.  
  563. if (freq_next < policy->min)
  564. freq_next = policy->min;
  565.  
  566. __cpufreq_driver_target(policy, freq_next,
  567. CPUFREQ_RELATION_L);
  568. }
  569. out:
  570. mutex_unlock(&dbs_mutex);
  571. return;
  572. }
  573.  
  574. static void do_cpu_up(struct work_struct *work)
  575. {
  576. int i = num_online_cpus();
  577. if( i < num_possible_cpus() && !cpu_online(i) ) cpu_up(i);
  578. }
  579.  
  580. static void do_cpu_down(struct work_struct *work)
  581. {
  582. int i = num_online_cpus() - 1;
  583. if( i > 0 && cpu_online(i) ) cpu_down(i);
  584. }
  585.  
  586. static void do_dbs_timer(struct work_struct *work)
  587. {
  588. struct cpu_dbs_info_s *dbs_info =
  589. container_of(work, struct cpu_dbs_info_s, work.work);
  590. unsigned int cpu = dbs_info->cpu;
  591.  
  592. /* We want all related CPUs to do sampling nearly on same jiffy */
  593. int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  594.  
  595. mutex_lock(&dbs_info->timer_mutex);
  596. dbs_check_cpu(dbs_info);
  597. queue_delayed_work_on(cpu, khotplug_wq, &dbs_info->work, delay);
  598. mutex_unlock(&dbs_info->timer_mutex);
  599. }
  600.  
  601. static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
  602. {
  603. /* We want all related CPUs to do sampling nearly on same jiffy */
  604. int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
  605. delay -= jiffies % delay;
  606.  
  607. INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
  608. INIT_WORK(&dbs_info->cpu_up_work, do_cpu_up);
  609. INIT_WORK(&dbs_info->cpu_down_work, do_cpu_down);
  610. queue_delayed_work_on(dbs_info->cpu, khotplug_wq, &dbs_info->work,
  611. delay);
  612. }
  613.  
  614. static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
  615. {
  616. cancel_delayed_work_sync(&dbs_info->work);
  617. }
  618.  
  619. static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
  620. unsigned int event)
  621. {
  622. unsigned int cpu = policy->cpu;
  623. struct cpu_dbs_info_s *this_dbs_info;
  624. unsigned int i, j, max_periods;
  625. int rc;
  626.  
  627. this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu);
  628.  
  629. switch (event) {
  630. case CPUFREQ_GOV_START:
  631. if ((!cpu_online(cpu)) || (!policy->cur))
  632. return -EINVAL;
  633.  
  634. mutex_lock(&dbs_mutex);
  635. dbs_enable++;
  636. for_each_cpu(j, policy->cpus) {
  637. struct cpu_dbs_info_s *j_dbs_info;
  638. j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
  639. j_dbs_info->cur_policy = policy;
  640.  
  641. j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
  642. &j_dbs_info->prev_cpu_wall);
  643. if (dbs_tuners_ins.ignore_nice) {
  644. j_dbs_info->prev_cpu_nice =
  645. kstat_cpu(j).cpustat.nice;
  646. }
  647.  
  648. max_periods = max(DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS,
  649. DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS);
  650. dbs_tuners_ins.hotplug_load_history = kmalloc(
  651. (sizeof(unsigned int) * max_periods),
  652. GFP_KERNEL);
  653. if (!dbs_tuners_ins.hotplug_load_history) {
  654. WARN_ON(1);
  655. return -ENOMEM;
  656. }
  657. for (i = 0; i < max_periods; i++)
  658. dbs_tuners_ins.hotplug_load_history[i] = 50;
  659. }
  660. this_dbs_info->cpu = cpu;
  661. this_dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
  662. /*
  663. * Start the timerschedule work, when this governor
  664. * is used for first time
  665. */
  666. if (dbs_enable == 1) {
  667. rc = sysfs_create_group(cpufreq_global_kobject,
  668. &dbs_attr_group);
  669. if (rc) {
  670. mutex_unlock(&dbs_mutex);
  671. return rc;
  672. }
  673. }
  674. mutex_unlock(&dbs_mutex);
  675.  
  676. mutex_init(&this_dbs_info->timer_mutex);
  677. dbs_timer_init(this_dbs_info);
  678. break;
  679.  
  680. case CPUFREQ_GOV_STOP:
  681. dbs_timer_exit(this_dbs_info);
  682.  
  683. mutex_lock(&dbs_mutex);
  684. mutex_destroy(&this_dbs_info->timer_mutex);
  685. dbs_enable--;
  686. mutex_unlock(&dbs_mutex);
  687. if (!dbs_enable)
  688. sysfs_remove_group(cpufreq_global_kobject,
  689. &dbs_attr_group);
  690. kfree(dbs_tuners_ins.hotplug_load_history);
  691. /*
  692. * XXX BIG CAVEAT: Stopping the governor with CPU1 offline
  693. * will result in it remaining offline until the user onlines
  694. * it again. It is up to the user to do this (for now).
  695. */
  696. break;
  697.  
  698. case CPUFREQ_GOV_LIMITS:
  699. mutex_lock(&this_dbs_info->timer_mutex);
  700. if (policy->max < this_dbs_info->cur_policy->cur)
  701. __cpufreq_driver_target(this_dbs_info->cur_policy,
  702. policy->max, CPUFREQ_RELATION_H);
  703. else if (policy->min > this_dbs_info->cur_policy->cur)
  704. __cpufreq_driver_target(this_dbs_info->cur_policy,
  705. policy->min, CPUFREQ_RELATION_L);
  706. mutex_unlock(&this_dbs_info->timer_mutex);
  707. break;
  708. }
  709. return 0;
  710. }
  711.  
  712. static int __init cpufreq_gov_dbs_init(void)
  713. {
  714. int err;
  715. cputime64_t wall;
  716. u64 idle_time;
  717. int cpu = get_cpu();
  718.  
  719. idle_time = get_cpu_idle_time_us(cpu, &wall);
  720. put_cpu();
  721. if (idle_time != -1ULL) {
  722. dbs_tuners_ins.up_threshold = DEFAULT_UP_FREQ_MIN_LOAD;
  723. } else {
  724. pr_err("cpufreq-hotplug: %s: assumes CONFIG_NO_HZ\n",
  725. __func__);
  726. return -EINVAL;
  727. }
  728.  
  729. khotplug_wq = create_workqueue("khotplug");
  730. if (!khotplug_wq) {
  731. pr_err("Creation of khotplug failed\n");
  732. return -EFAULT;
  733. }
  734. err = cpufreq_register_governor(&cpufreq_gov_hotplug);
  735. if (err)
  736. destroy_workqueue(khotplug_wq);
  737.  
  738. return err;
  739. }
  740.  
  741. static void __exit cpufreq_gov_dbs_exit(void)
  742. {
  743. cpufreq_unregister_governor(&cpufreq_gov_hotplug);
  744. destroy_workqueue(khotplug_wq);
  745. }
  746.  
  747. MODULE_AUTHOR("Mike Turquette <mturquette@ti.com>");
  748. MODULE_DESCRIPTION("'cpufreq_hotplug' - cpufreq governor for dynamic frequency scaling and CPU hotplugging");
  749. MODULE_LICENSE("GPL");
  750.  
  751. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
  752. fs_initcall(cpufreq_gov_dbs_init);
  753. #else
  754. module_init(cpufreq_gov_dbs_init);
  755. #endif
  756. module_exit(cpufreq_gov_dbs_exit);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement