Advertisement
Guest User

cpufreq_lionfish.c

a guest
Jul 8th, 2015
298
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 25.93 KB | None | 0 0
  1. /*
  2.  * linux/drivers/cpufreq/cpufreq_lionfish.c
  3.  *
  4.  * Copyright (C) 2015 Sultan Qasim Khan <sultanqasim@gmail.com>
  5.  *
  6.  * This program is free software; you can redistribute it and/or modify
  7.  * it under the terms of the GNU General Public License version 2 as
  8.  * published by the Free Software Foundation.
  9.  *
  10.  */
  11.  
  12. #include <linux/types.h>
  13. #include <linux/slab.h>
  14. #include <linux/kernel_stat.h>
  15. #include "cpufreq_governor.h"
  16.  
  17. /* Lionfish governor tunable defaults */
  18. #define DEF_FREQUENCY_JUMP_THRESHOLD    (95)
  19. #define DEF_FREQUENCY_UP_THRESHOLD  (80)
  20. #define DEF_FREQUENCY_DOWN_THRESHOLD    (40)
  21. #define DEF_FREQUENCY_JUMP_LEVEL    (800000)
  22. #define DEF_SAMPLING_UP_FACTOR      (2)
  23. #define DEF_SAMPLING_DOWN_FACTOR    (3)
  24.  
  25. /* Lionfish governor fixed settings */
  26. #define RAMP_UP_PERCENTAGE      (130)
  27. #define RAMP_DOWN_PERCENTAGE        (65)
  28. #define FREQUENCY_STEP_PERCENTAGE   (5)
  29. #define JUMP_HISPEED_FREQ_PERCENTAGE    (83)
  30. #define MAX_SAMPLING_FACTOR     (10)
  31.  
  32. #define LIONFISH_VERSION_MAJOR      (1)
  33. #define LIONFISH_VERSION_MINOR      (0)
  34.  
  35. /************************** type definitions ****************************/
  36. struct lf_cpu_dbs_info_s {
  37.     struct cpu_dbs_common_info cdbs;
  38.     unsigned int up_ticks;
  39.     unsigned int down_ticks;
  40.     unsigned int requested_freq;
  41.     unsigned int enable:1;
  42. };
  43.  
  44. struct lf_dbs_tuners {
  45.     unsigned int ignore_nice_load;
  46.     unsigned int sampling_rate;
  47.     unsigned int sampling_up_factor;
  48.     unsigned int sampling_down_factor;
  49.     unsigned int jump_threshold;
  50.     unsigned int up_threshold;
  51.     unsigned int down_threshold;
  52.     unsigned int jump_level;
  53. };
  54.  
  55. struct lf_gdbs_data;
  56.  
  57. /* Common Governor data across policies */
  58. struct lf_dbs_data {
  59.     /*
  60.      * governor sysfs attributes
  61.      * for system governor and per policy governor
  62.      */
  63.     struct attribute_group *attr_group_gov_sys;
  64.     struct attribute_group *attr_group_gov_pol;
  65.  
  66.     /*
  67.      * Common data for platforms that don't set
  68.      * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
  69.      */
  70.     struct lf_gdbs_data *gdbs_data;
  71.  
  72.     /* handles frequency change notifications */
  73.     struct notifier_block *notifier_block;
  74. };
  75.  
  76. /* Governor per policy data */
  77. struct lf_gdbs_data {
  78.     struct lf_dbs_data *cdata;
  79.     unsigned int min_sampling_rate;
  80.     int usage_count;
  81.     struct lf_dbs_tuners *tuners;
  82.  
  83.     /* dbs_mutex protects dbs_enable in governor start/stop */
  84.     struct mutex mutex;
  85. };
  86.  
  87. /************************ function declarations *************************/
  88. static void lf_gov_queue_work(struct lf_gdbs_data *dbs_data,
  89.         struct cpufreq_policy *policy, unsigned int delay, bool all_cpus);
  90.  
  91. /************************** sysfs macro magic ***************************/
  92. #define lf_show_one(_gov, file_name)                    \
  93. static ssize_t show_##file_name##_gov_sys               \
  94. (struct kobject *kobj, struct attribute *attr, char *buf)       \
  95. {                                   \
  96.     struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
  97.     return sprintf(buf, "%u\n", tuners->file_name);         \
  98. }                                   \
  99.                                     \
  100. static ssize_t show_##file_name##_gov_pol               \
  101. (struct cpufreq_policy *policy, char *buf)              \
  102. {                                   \
  103.     struct lf_gdbs_data *dbs_data = policy->governor_data;      \
  104.     struct _gov##_dbs_tuners *tuners = dbs_data->tuners;        \
  105.     return sprintf(buf, "%u\n", tuners->file_name);         \
  106. }
  107.  
  108. #define lf_store_one(_gov, file_name)                   \
  109. static ssize_t store_##file_name##_gov_sys              \
  110. (struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
  111. {                                   \
  112.     struct lf_gdbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
  113.     return store_##file_name(dbs_data, buf, count);         \
  114. }                                   \
  115.                                     \
  116. static ssize_t store_##file_name##_gov_pol              \
  117. (struct cpufreq_policy *policy, const char *buf, size_t count)      \
  118. {                                   \
  119.     struct lf_gdbs_data *dbs_data = policy->governor_data;      \
  120.     return store_##file_name(dbs_data, buf, count);         \
  121. }
  122.  
  123. #define lf_show_store_one(_gov, file_name)              \
  124. lf_show_one(_gov, file_name);                       \
  125. lf_store_one(_gov, file_name)
  126.  
  127. #define lf_declare_show_sampling_rate_min(_gov)             \
  128. static ssize_t show_sampling_rate_min_gov_sys               \
  129. (struct kobject *kobj, struct attribute *attr, char *buf)       \
  130. {                                   \
  131.     struct lf_gdbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
  132.     return sprintf(buf, "%u\n", dbs_data->min_sampling_rate);   \
  133. }                                   \
  134.                                     \
  135. static ssize_t show_sampling_rate_min_gov_pol               \
  136. (struct cpufreq_policy *policy, char *buf)              \
  137. {                                   \
  138.     struct lf_gdbs_data *dbs_data = policy->governor_data;      \
  139.     return sprintf(buf, "%u\n", dbs_data->min_sampling_rate);   \
  140. }
  141.  
  142. /************************** global variables ****************************/
  143. static DEFINE_PER_CPU(struct lf_cpu_dbs_info_s, lf_cpu_dbs_info);
  144. static struct lf_dbs_data lf_dbs_cdata;
  145. /* there are more globals declared after the sysfs code */
  146.  
  147. /*********************** lionfish governor logic ************************/
  148. static inline unsigned int get_freq_target(struct lf_dbs_tuners *lf_tuners,
  149.                        struct cpufreq_policy *policy)
  150. {
  151.     unsigned int freq_target =
  152.         (FREQUENCY_STEP_PERCENTAGE * policy->max) / 100;
  153.  
  154.     /* max freq cannot be less than 100. But who knows... */
  155.     if (unlikely(freq_target == 0))
  156.         freq_target = FREQUENCY_STEP_PERCENTAGE;
  157.  
  158.     return freq_target;
  159. }
  160.  
  161. /*
  162.  * This function is the heart of the governor. It is called periodically
  163.  * for each CPU core.
  164.  *
  165.  * This governor uses two different approaches to control the CPU frequency.
  166.  * It jumps quickly to a target frequency under very heavy load, and it votes
  167.  * to gradually ramp up or down the frequency under moderate load.
  168.  *
  169.  * When the CPU is near saturation (load above jump_threshold, 95% by default),
  170.  * the goveror will quickly jump to a higher frequency and skip intermediate
  171.  * levels for fast response. When saturated while the frequency is below
  172.  * jump_level (800 MHz by default), the CPU jumps to jump_level. When saturated
  173.  * at or above this frequency, the governor jumps to a large fraction of the
  174.  * full CPU speed (83% as presently defined). It does not jump straight to the
  175.  * full frequency because performance per watt at full speed is usually
  176.  * significantly worse than performance per watt slightly below it.
  177.  *
  178.  * Under more moderate loads, during each sample period, the governor votes to
  179.  * increase or decrease CPU frequency, aiming to maintain load between
  180.  * up_threshold and down_threshold. If the load is within the desired range,
  181.  * no votes are cast and the vote counters are decremented by one to indicate
  182.  * that the current frequency is good. When enough votes (sampling_up_factor
  183.  * or sampling_down_factor) are cast to raise/lower the frequency, the frequency
  184.  * will be ramped up or down by an amount proportional to the current frequency.
  185.  */
  186. static void lf_check_cpu(struct lf_gdbs_data *dbs_data, int cpu)
  187. {
  188.     struct lf_cpu_dbs_info_s *dbs_info = &per_cpu(lf_cpu_dbs_info, cpu);
  189.     struct cpu_dbs_common_info *cdbs = &per_cpu(lf_cpu_dbs_info, cpu).cdbs;
  190.     struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
  191.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  192.     unsigned int freq_shift;
  193.     unsigned int new_frequency;
  194.     unsigned int load = 0;
  195.     unsigned int j;
  196.     bool voted = false;
  197.  
  198.     /* compute the maximum absolute load */
  199.     for_each_cpu(j, policy->cpus) {
  200.         struct cpu_dbs_common_info *j_cdbs;
  201.         u64 cur_wall_time, cur_idle_time;
  202.         unsigned int idle_time, wall_time;
  203.         unsigned int cpu_load;
  204.  
  205.         j_cdbs = &per_cpu(lf_cpu_dbs_info, j).cdbs;
  206.  
  207.         /* last parameter 0 means that IO wait is considered idle */
  208.         cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, 0);
  209.  
  210.         wall_time = (unsigned int)
  211.             (cur_wall_time - j_cdbs->prev_cpu_wall);
  212.         j_cdbs->prev_cpu_wall = cur_wall_time;
  213.  
  214.         idle_time = (unsigned int)
  215.             (cur_idle_time - j_cdbs->prev_cpu_idle);
  216.         j_cdbs->prev_cpu_idle = cur_idle_time;
  217.  
  218.         if (lf_tuners->ignore_nice_load) {
  219.             u64 cur_nice;
  220.             unsigned long cur_nice_jiffies;
  221.  
  222.             cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
  223.                      cdbs->prev_cpu_nice;
  224.             /*
  225.              * Assumption: nice time between sampling periods will
  226.              * be less than 2^32 jiffies for 32 bit sys
  227.              */
  228.             cur_nice_jiffies = (unsigned long)
  229.                     cputime64_to_jiffies64(cur_nice);
  230.  
  231.             cdbs->prev_cpu_nice =
  232.                 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  233.             idle_time += jiffies_to_usecs(cur_nice_jiffies);
  234.         }
  235.  
  236.         if (unlikely(!wall_time || wall_time < idle_time))
  237.             continue;
  238.  
  239.         cpu_load = 100 * (wall_time - idle_time) / wall_time;
  240.  
  241.         if (cpu_load > load)
  242.             load = cpu_load;
  243.     }
  244.  
  245.     freq_shift = get_freq_target(lf_tuners, policy);
  246.  
  247.     /* Check for frequency jump */
  248.     if (load > lf_tuners->jump_threshold) {
  249.         /* if we are already at full speed then break out early */
  250.         if (policy->cur == policy->max)
  251.             return;
  252.  
  253.         if (dbs_info->requested_freq + freq_shift < lf_tuners->jump_level) {
  254.             dbs_info->requested_freq = lf_tuners->jump_level;
  255.             if (unlikely(dbs_info->requested_freq > policy->max))
  256.                 dbs_info->requested_freq = policy->max;
  257.         } else {
  258.             dbs_info->requested_freq = policy->max *
  259.                 JUMP_HISPEED_FREQ_PERCENTAGE / 100;
  260.             if (unlikely(dbs_info->requested_freq < policy->min))
  261.                 dbs_info->requested_freq = policy->min;
  262.         }
  263.  
  264.         __cpufreq_driver_target(policy, dbs_info->requested_freq,
  265.             CPUFREQ_RELATION_H);
  266.         dbs_info->up_ticks = dbs_info->down_ticks = 0;
  267.         return;
  268.     }
  269.  
  270.     /* Check for frequency increase */
  271.     if (load > lf_tuners->up_threshold) {
  272.         /* if we are already at full speed then break out early */
  273.         if (dbs_info->requested_freq == policy->max)
  274.             return;
  275.  
  276.         /* vote to raise the frequency */
  277.         dbs_info->up_ticks += 1;
  278.         dbs_info->down_ticks = 0;
  279.         voted = true;
  280.     }
  281.  
  282.     /* Check for frequency decrease */
  283.     if (load < lf_tuners->down_threshold) {
  284.         /* if we cannot reduce the frequency anymore, break out early */
  285.         if (policy->cur == policy->min)
  286.             return;
  287.  
  288.         /* vote to lower the frequency */
  289.         dbs_info->down_ticks += 1;
  290.         dbs_info->up_ticks = 0;
  291.         voted = true;
  292.     }
  293.  
  294.     if (!voted)
  295.     {
  296.         if (dbs_info->down_ticks) dbs_info->down_ticks -= 1;
  297.         if (dbs_info->up_ticks) dbs_info->up_ticks -= 1;
  298.     }
  299.  
  300.     /* update the frequency if enough votes to change */
  301.     if (dbs_info->up_ticks >= lf_tuners->sampling_up_factor) {
  302.         new_frequency = policy->cur * RAMP_UP_PERCENTAGE / 100;
  303.         if (new_frequency < dbs_info->requested_freq + freq_shift)
  304.             new_frequency = dbs_info->requested_freq + freq_shift;
  305.         if (new_frequency > policy->max)
  306.             new_frequency = policy->max;
  307.  
  308.         dbs_info->requested_freq = new_frequency;
  309.  
  310.         __cpufreq_driver_target(policy, dbs_info->requested_freq,
  311.             CPUFREQ_RELATION_H);
  312.         dbs_info->up_ticks = dbs_info->down_ticks = 0;
  313.     } else if (dbs_info->down_ticks >= lf_tuners->sampling_down_factor) {
  314.         new_frequency = policy->cur * RAMP_DOWN_PERCENTAGE / 100;
  315.         if (new_frequency > dbs_info->requested_freq - freq_shift)
  316.             new_frequency = dbs_info->requested_freq - freq_shift;
  317.         if (new_frequency < policy->min)
  318.             new_frequency = policy->min;
  319.  
  320.         dbs_info->requested_freq = new_frequency;
  321.  
  322.         __cpufreq_driver_target(policy, dbs_info->requested_freq,
  323.             CPUFREQ_RELATION_L);
  324.         dbs_info->up_ticks = dbs_info->down_ticks = 0;
  325.     }
  326. }
  327.  
  328. static void lf_dbs_timer(struct work_struct *work)
  329. {
  330.     struct lf_cpu_dbs_info_s *dbs_info = container_of(work,
  331.             struct lf_cpu_dbs_info_s, cdbs.work.work);
  332.     unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
  333.     struct lf_cpu_dbs_info_s *core_dbs_info = &per_cpu(lf_cpu_dbs_info,
  334.             cpu);
  335.     struct lf_gdbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
  336.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  337.     int delay = delay_for_sampling_rate(lf_tuners->sampling_rate);
  338.     bool modify_all = true;
  339.  
  340.     mutex_lock(&core_dbs_info->cdbs.timer_mutex);
  341.     if (!need_load_eval(&core_dbs_info->cdbs, lf_tuners->sampling_rate))
  342.         modify_all = false;
  343.     else
  344.         lf_check_cpu(dbs_data, cpu);
  345.  
  346.     lf_gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
  347.     mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
  348. }
  349.  
  350. static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  351.         void *data)
  352. {
  353.     struct cpufreq_freqs *freq = data;
  354.     struct lf_cpu_dbs_info_s *dbs_info =
  355.                     &per_cpu(lf_cpu_dbs_info, freq->cpu);
  356.     struct cpufreq_policy *policy;
  357.  
  358.     if (!dbs_info->enable)
  359.         return 0;
  360.  
  361.     policy = dbs_info->cdbs.cur_policy;
  362.  
  363.     /*
  364.      * we only care if our internally tracked freq moves outside the 'valid'
  365.      * ranges of frequency available to us otherwise we do not change it
  366.     */
  367.     if (dbs_info->requested_freq > policy->max
  368.             || dbs_info->requested_freq < policy->min)
  369.         dbs_info->requested_freq = freq->new;
  370.  
  371.     return 0;
  372. }
  373.  
  374. /*************************** sysfs interface ****************************/
  375. static ssize_t store_sampling_up_factor(struct lf_gdbs_data *dbs_data,
  376.         const char *buf, size_t count)
  377. {
  378.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  379.     unsigned int input;
  380.     int ret;
  381.     ret = sscanf(buf, "%u", &input);
  382.  
  383.     if (ret != 1 || input > MAX_SAMPLING_FACTOR || input < 1)
  384.         return -EINVAL;
  385.  
  386.     lf_tuners->sampling_up_factor = input;
  387.     return count;
  388. }
  389.  
  390. static ssize_t store_sampling_down_factor(struct lf_gdbs_data *dbs_data,
  391.         const char *buf, size_t count)
  392. {
  393.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  394.     unsigned int input;
  395.     int ret;
  396.     ret = sscanf(buf, "%u", &input);
  397.  
  398.     if (ret != 1 || input > MAX_SAMPLING_FACTOR || input < 1)
  399.         return -EINVAL;
  400.  
  401.     lf_tuners->sampling_down_factor = input;
  402.     return count;
  403. }
  404.  
  405. static ssize_t store_sampling_rate(struct lf_gdbs_data *dbs_data, const char *buf,
  406.         size_t count)
  407. {
  408.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  409.     unsigned int input;
  410.     int ret;
  411.     ret = sscanf(buf, "%u", &input);
  412.  
  413.     if (ret != 1)
  414.         return -EINVAL;
  415.  
  416.     lf_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
  417.     return count;
  418. }
  419.  
  420. static ssize_t store_jump_threshold(struct lf_gdbs_data *dbs_data, const char *buf,
  421.         size_t count)
  422. {
  423.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  424.     unsigned int input;
  425.     int ret;
  426.     ret = sscanf(buf, "%u", &input);
  427.  
  428.     if (ret != 1 || input > 100 || input <= lf_tuners->down_threshold)
  429.         return -EINVAL;
  430.  
  431.     lf_tuners->jump_threshold = input;
  432.     return count;
  433. }
  434.  
  435. static ssize_t store_up_threshold(struct lf_gdbs_data *dbs_data, const char *buf,
  436.         size_t count)
  437. {
  438.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  439.     unsigned int input;
  440.     int ret;
  441.     ret = sscanf(buf, "%u", &input);
  442.  
  443.     if (ret != 1 || input > 100 || input <= lf_tuners->down_threshold)
  444.         return -EINVAL;
  445.  
  446.     lf_tuners->up_threshold = input;
  447.     return count;
  448. }
  449.  
  450. static ssize_t store_down_threshold(struct lf_gdbs_data *dbs_data, const char *buf,
  451.         size_t count)
  452. {
  453.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  454.     unsigned int input;
  455.     int ret;
  456.     ret = sscanf(buf, "%u", &input);
  457.  
  458.     /* cannot be lower than 11 otherwise freq will not fall */
  459.     if (ret != 1 || input < 11 || input > 100 ||
  460.             input >= lf_tuners->up_threshold)
  461.         return -EINVAL;
  462.  
  463.     lf_tuners->down_threshold = input;
  464.     return count;
  465. }
  466.  
  467. static ssize_t store_jump_level(struct lf_gdbs_data *dbs_data, const char *buf,
  468.         size_t count)
  469. {
  470.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  471.     unsigned int input;
  472.     int ret;
  473.     ret = sscanf(buf, "%u", &input);
  474.  
  475.     /* jump level is a frequency in kHz */
  476.     if (ret != 1 || input < 80000 || input > 5000000)
  477.         return -EINVAL;
  478.  
  479.     lf_tuners->jump_level = input;
  480.     return count;
  481. }
  482.  
  483. static ssize_t store_ignore_nice_load(struct lf_gdbs_data *dbs_data,
  484.         const char *buf, size_t count)
  485. {
  486.     struct lf_dbs_tuners *lf_tuners = dbs_data->tuners;
  487.     unsigned int input, j;
  488.     int ret;
  489.  
  490.     ret = sscanf(buf, "%u", &input);
  491.     if (ret != 1)
  492.         return -EINVAL;
  493.  
  494.     if (input > 1)
  495.         input = 1;
  496.  
  497.     if (input == lf_tuners->ignore_nice_load) /* nothing to do */
  498.         return count;
  499.  
  500.     lf_tuners->ignore_nice_load = input;
  501.  
  502.     /* we need to re-evaluate prev_cpu_idle */
  503.     for_each_online_cpu(j) {
  504.         struct lf_cpu_dbs_info_s *dbs_info;
  505.         dbs_info = &per_cpu(lf_cpu_dbs_info, j);
  506.         dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
  507.                     &dbs_info->cdbs.prev_cpu_wall, 0);
  508.         if (lf_tuners->ignore_nice_load)
  509.             dbs_info->cdbs.prev_cpu_nice =
  510.                 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  511.     }
  512.     return count;
  513. }
  514.  
  515. /* macros to autogenerate sysfs interface */
  516. lf_show_store_one(lf, sampling_rate);
  517. lf_show_store_one(lf, sampling_up_factor);
  518. lf_show_store_one(lf, sampling_down_factor);
  519. lf_show_store_one(lf, jump_threshold);
  520. lf_show_store_one(lf, up_threshold);
  521. lf_show_store_one(lf, down_threshold);
  522. lf_show_store_one(lf, jump_level);
  523. lf_show_store_one(lf, ignore_nice_load);
  524. lf_declare_show_sampling_rate_min(lf);
  525.  
  526. gov_sys_pol_attr_rw(sampling_rate);
  527. gov_sys_pol_attr_rw(sampling_up_factor);
  528. gov_sys_pol_attr_rw(sampling_down_factor);
  529. gov_sys_pol_attr_rw(jump_threshold);
  530. gov_sys_pol_attr_rw(up_threshold);
  531. gov_sys_pol_attr_rw(down_threshold);
  532. gov_sys_pol_attr_rw(jump_level);
  533. gov_sys_pol_attr_rw(ignore_nice_load);
  534. gov_sys_pol_attr_ro(sampling_rate_min);
  535.  
  536. /********** globals requiring sysfs functions to be defined *************/
  537. static struct attribute *dbs_attributes_gov_sys[] = {
  538.     &sampling_rate_min_gov_sys.attr,
  539.     &sampling_rate_gov_sys.attr,
  540.     &sampling_up_factor_gov_sys.attr,
  541.     &sampling_down_factor_gov_sys.attr,
  542.     &jump_threshold_gov_sys.attr,
  543.     &up_threshold_gov_sys.attr,
  544.     &down_threshold_gov_sys.attr,
  545.     &jump_level_gov_sys.attr,
  546.     &ignore_nice_load_gov_sys.attr,
  547.     NULL
  548. };
  549.  
  550. static struct attribute_group lf_attr_group_gov_sys = {
  551.     .attrs = dbs_attributes_gov_sys,
  552.     .name = "lionfish",
  553. };
  554.  
  555. static struct attribute *dbs_attributes_gov_pol[] = {
  556.     &sampling_rate_min_gov_pol.attr,
  557.     &sampling_rate_gov_pol.attr,
  558.     &sampling_up_factor_gov_pol.attr,
  559.     &sampling_down_factor_gov_pol.attr,
  560.     &jump_threshold_gov_pol.attr,
  561.     &up_threshold_gov_pol.attr,
  562.     &down_threshold_gov_pol.attr,
  563.     &jump_level_gov_pol.attr,
  564.     &ignore_nice_load_gov_pol.attr,
  565.     NULL
  566. };
  567.  
  568. static struct attribute_group lf_attr_group_gov_pol = {
  569.     .attrs = dbs_attributes_gov_pol,
  570.     .name = "lionfish",
  571. };
  572.  
  573. static struct notifier_block lf_cpufreq_notifier_block = {
  574.     .notifier_call = dbs_cpufreq_notifier,
  575. };
  576.  
  577. static struct lf_dbs_data lf_dbs_cdata = {
  578.     .attr_group_gov_sys = &lf_attr_group_gov_sys,
  579.     .attr_group_gov_pol = &lf_attr_group_gov_pol,
  580.     .notifier_block = &lf_cpufreq_notifier_block,
  581. };
  582.  
  583. /*************************** boilerplate code ***************************/
  584. static struct attribute_group *get_sysfs_attr(struct lf_gdbs_data *dbs_data)
  585. {
  586.     if (have_governor_per_policy())
  587.         return dbs_data->cdata->attr_group_gov_pol;
  588.     else
  589.         return dbs_data->cdata->attr_group_gov_sys;
  590. }
  591.  
  592. static inline void __lf_gov_queue_work(int cpu, struct lf_gdbs_data *dbs_data,
  593.         unsigned int delay)
  594. {
  595.     struct cpu_dbs_common_info *cdbs = &per_cpu(lf_cpu_dbs_info, cpu).cdbs;
  596.  
  597.     mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
  598. }
  599.  
  600. static void lf_gov_queue_work(struct lf_gdbs_data *dbs_data,
  601.         struct cpufreq_policy *policy, unsigned int delay, bool all_cpus)
  602. {
  603.     int i;
  604.  
  605.     if (!policy->governor_enabled)
  606.         return;
  607.  
  608.     if (!all_cpus) {
  609.         /*
  610.          * Use raw_smp_processor_id() to avoid preemptible warnings.
  611.          * We know that this is only called with all_cpus == false from
  612.          * works that have been queued with *_work_on() functions and
  613.          * those works are canceled during CPU_DOWN_PREPARE so they
  614.          * can't possibly run on any other CPU.
  615.          */
  616.         __lf_gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
  617.     } else {
  618.         for_each_cpu(i, policy->cpus)
  619.             __lf_gov_queue_work(i, dbs_data, delay);
  620.     }
  621. }
  622.  
  623. static inline void lf_gov_cancel_work(struct lf_gdbs_data *dbs_data,
  624.         struct cpufreq_policy *policy)
  625. {
  626.     struct cpu_dbs_common_info *cdbs;
  627.     int i;
  628.  
  629.     for_each_cpu(i, policy->cpus) {
  630.         cdbs = &per_cpu(lf_cpu_dbs_info, i).cdbs;
  631.         cancel_delayed_work_sync(&cdbs->work);
  632.     }
  633. }
  634.  
  635. static int lf_init(struct lf_gdbs_data *dbs_data)
  636. {
  637.     struct lf_dbs_tuners *tuners;
  638.  
  639.     tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
  640.     if (!tuners) {
  641.         pr_err("%s: kzalloc failed\n", __func__);
  642.         return -ENOMEM;
  643.     }
  644.  
  645.     tuners->jump_threshold = DEF_FREQUENCY_JUMP_THRESHOLD;
  646.     tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
  647.     tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
  648.     tuners->jump_level = DEF_FREQUENCY_JUMP_LEVEL;
  649.     tuners->sampling_up_factor = DEF_SAMPLING_UP_FACTOR;
  650.     tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
  651.     tuners->ignore_nice_load = 0;
  652.  
  653.     dbs_data->tuners = tuners;
  654.  
  655.     /*
  656.      * you want at least 8 jiffies between sample intervals for the
  657.      * CPU usage stats to be reasonable
  658.      */
  659.     dbs_data->min_sampling_rate = jiffies_to_usecs(8);
  660.  
  661.     mutex_init(&dbs_data->mutex);
  662.     return 0;
  663. }
  664.  
  665. static void lf_exit(struct lf_gdbs_data *dbs_data)
  666. {
  667.     kfree(dbs_data->tuners);
  668. }
  669.  
  670. static int lf_cpufreq_governor_dbs(struct cpufreq_policy *policy,
  671.                    unsigned int event)
  672. {
  673.     struct lf_dbs_data *cdata = &lf_dbs_cdata;
  674.     struct lf_gdbs_data *dbs_data;
  675.     struct lf_cpu_dbs_info_s *lf_dbs_info = NULL;
  676.     struct lf_dbs_tuners *lf_tuners = NULL;
  677.     struct cpu_dbs_common_info *cpu_cdbs;
  678.     unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
  679.     int rc;
  680.  
  681.     if (have_governor_per_policy())
  682.         dbs_data = policy->governor_data;
  683.     else
  684.         dbs_data = cdata->gdbs_data;
  685.  
  686.     WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
  687.  
  688.     switch (event) {
  689.     case CPUFREQ_GOV_POLICY_INIT:
  690.         if (have_governor_per_policy()) {
  691.             WARN_ON(dbs_data);
  692.         } else if (dbs_data) {
  693.             dbs_data->usage_count++;
  694.             policy->governor_data = dbs_data;
  695.             return 0;
  696.         }
  697.  
  698.         dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
  699.         if (!dbs_data) {
  700.             pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
  701.             return -ENOMEM;
  702.         }
  703.  
  704.         dbs_data->cdata = cdata;
  705.         dbs_data->usage_count = 1;
  706.         rc = lf_init(dbs_data);
  707.         if (rc) {
  708.             pr_err("%s: POLICY_INIT: init() failed\n", __func__);
  709.             kfree(dbs_data);
  710.             return rc;
  711.         }
  712.  
  713.         if (!have_governor_per_policy())
  714.             WARN_ON(cpufreq_get_global_kobject());
  715.  
  716.         rc = sysfs_create_group(get_governor_parent_kobj(policy),
  717.                 get_sysfs_attr(dbs_data));
  718.         if (rc) {
  719.             lf_exit(dbs_data);
  720.             kfree(dbs_data);
  721.             return rc;
  722.         }
  723.  
  724.         policy->governor_data = dbs_data;
  725.  
  726.         /* policy latency is in ns. Convert it to us first */
  727.         latency = policy->cpuinfo.transition_latency / 1000;
  728.         if (latency == 0)
  729.             latency = 1;
  730.  
  731.         /*
  732.          * The minimum sampling rate should be at least 1000x the
  733.          * CPU frequency transition latency. We compare this
  734.          * requirement with the governor's minimum sampling rate
  735.          * and use whichever is greater. By default, we will sample
  736.          * at the half fastest acceptable rate.
  737.          */
  738.         dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
  739.                 LATENCY_MULTIPLIER * latency);
  740.         dbs_data->tuners->sampling_rate = dbs_data->min_sampling_rate * 2;
  741.  
  742.         if (!policy->governor->initialized) {
  743.             cpufreq_register_notifier(dbs_data->cdata->notifier_block,
  744.                     CPUFREQ_TRANSITION_NOTIFIER);
  745.         }
  746.  
  747.         if (!have_governor_per_policy())
  748.             cdata->gdbs_data = dbs_data;
  749.  
  750.         return 0;
  751.  
  752.     case CPUFREQ_GOV_POLICY_EXIT:
  753.         if (!--dbs_data->usage_count) {
  754.             sysfs_remove_group(get_governor_parent_kobj(policy),
  755.                     get_sysfs_attr(dbs_data));
  756.  
  757.             if (!have_governor_per_policy())
  758.                 cpufreq_put_global_kobject();
  759.  
  760.             if (policy->governor->initialized == 1) {
  761.                 cpufreq_unregister_notifier(dbs_data->cdata->notifier_block,
  762.                         CPUFREQ_TRANSITION_NOTIFIER);
  763.             }
  764.  
  765.             lf_exit(dbs_data);
  766.             kfree(dbs_data);
  767.             cdata->gdbs_data = NULL;
  768.         }
  769.  
  770.         policy->governor_data = NULL;
  771.         return 0;
  772.  
  773.     case CPUFREQ_GOV_START:
  774.         if (!policy->cur)
  775.             return -EINVAL;
  776.  
  777.         cpu_cdbs = &per_cpu(lf_cpu_dbs_info, cpu).cdbs;
  778.         lf_tuners = dbs_data->tuners;
  779.         lf_dbs_info = &per_cpu(lf_cpu_dbs_info, cpu);
  780.         sampling_rate = lf_tuners->sampling_rate;
  781.         ignore_nice = lf_tuners->ignore_nice_load;
  782.  
  783.         mutex_lock(&dbs_data->mutex);
  784.  
  785.         for_each_cpu(j, policy->cpus) {
  786.             struct cpu_dbs_common_info *j_cdbs =
  787.                 &per_cpu(lf_cpu_dbs_info, j).cdbs;
  788.  
  789.             j_cdbs->cpu = j;
  790.             j_cdbs->cur_policy = policy;
  791.             j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
  792.                            &j_cdbs->prev_cpu_wall, 0);
  793.             if (ignore_nice)
  794.                 j_cdbs->prev_cpu_nice =
  795.                     kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  796.  
  797.             mutex_init(&j_cdbs->timer_mutex);
  798.             INIT_DEFERRABLE_WORK(&j_cdbs->work, lf_dbs_timer);
  799.         }
  800.  
  801.         lf_dbs_info->up_ticks = 0;
  802.         lf_dbs_info->down_ticks = 0;
  803.         lf_dbs_info->enable = 1;
  804.         lf_dbs_info->requested_freq = policy->cur;
  805.  
  806.         mutex_unlock(&dbs_data->mutex);
  807.  
  808.         /* Initiate timer time stamp */
  809.         cpu_cdbs->time_stamp = ktime_get();
  810.  
  811.         lf_gov_queue_work(dbs_data, policy,
  812.                 delay_for_sampling_rate(sampling_rate), true);
  813.         break;
  814.  
  815.     case CPUFREQ_GOV_STOP:
  816.         cpu_cdbs = &per_cpu(lf_cpu_dbs_info, cpu).cdbs;
  817.         lf_dbs_info = &per_cpu(lf_cpu_dbs_info, cpu);
  818.         lf_dbs_info->enable = 0;
  819.  
  820.         lf_gov_cancel_work(dbs_data, policy);
  821.  
  822.         mutex_lock(&dbs_data->mutex);
  823.         mutex_destroy(&cpu_cdbs->timer_mutex);
  824.         cpu_cdbs->cur_policy = NULL;
  825.  
  826.         mutex_unlock(&dbs_data->mutex);
  827.  
  828.         break;
  829.  
  830.     case CPUFREQ_GOV_LIMITS:
  831.         cpu_cdbs = &per_cpu(lf_cpu_dbs_info, cpu).cdbs;
  832.         mutex_lock(&cpu_cdbs->timer_mutex);
  833.         if (policy->max < cpu_cdbs->cur_policy->cur)
  834.             __cpufreq_driver_target(cpu_cdbs->cur_policy,
  835.                     policy->max, CPUFREQ_RELATION_H);
  836.         else if (policy->min > cpu_cdbs->cur_policy->cur)
  837.             __cpufreq_driver_target(cpu_cdbs->cur_policy,
  838.                     policy->min, CPUFREQ_RELATION_L);
  839.         lf_check_cpu(dbs_data, cpu);
  840.         mutex_unlock(&cpu_cdbs->timer_mutex);
  841.         break;
  842.     default:
  843.         break;
  844.     }
  845.  
  846.     return 0;
  847. }
  848.  
  849. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LIONFISH
  850. static
  851. #endif
  852. struct cpufreq_governor cpufreq_gov_lionfish = {
  853.     .name           = "lionfish",
  854.     .governor       = lf_cpufreq_governor_dbs,
  855.     .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  856.     .owner          = THIS_MODULE,
  857. };
  858.  
  859. static int __init cpufreq_gov_dbs_init(void)
  860. {
  861.     return cpufreq_register_governor(&cpufreq_gov_lionfish);
  862. }
  863.  
  864. static void __exit cpufreq_gov_dbs_exit(void)
  865. {
  866.     cpufreq_unregister_governor(&cpufreq_gov_lionfish);
  867. }
  868.  
  869. MODULE_AUTHOR("Sultan Qasim Khan <sultanqasim@gmail.com>");
  870. MODULE_DESCRIPTION("'cpufreq_lionfish' - A dynamic cpufreq governor for mobile "
  871.         "devices designed to keep CPU frequencies to a minimum while "
  872.         "still briefly boosting frequencies as required to avoid lag."
  873. );
  874. MODULE_LICENSE("GPL");
  875.  
  876. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LIONFISH
  877. fs_initcall(cpufreq_gov_dbs_init);
  878. #else
  879. module_init(cpufreq_gov_dbs_init);
  880. #endif
  881. module_exit(cpufreq_gov_dbs_exit);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement