Advertisement
Guest User

Untitled

a guest
Jul 20th, 2018
164
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 24.85 KB | None | 0 0
  1. /*
  2.  * CPUFreq governor based on scheduler-provided CPU utilization data.
  3.  *
  4.  * Copyright (C) 2016, Intel Corporation
  5.  * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  6.  *
  7.  * Copyright (C) 2017, Alucard24@XDA
  8.  * Author: Alucard24 <dmbaoh2@gmail.com>
  9.  *
  10.  * This program is free software; you can redistribute it and/or modify
  11.  * it under the terms of the GNU General Public License version 2 as
  12.  * published by the Free Software Foundation.
  13.  */
  14.  
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16.  
  17. #include <linux/cpufreq.h>
  18. #include <linux/kthread.h>
  19. #include <linux/slab.h>
  20. #include <trace/events/power.h>
  21.  
  22. #include "sched.h"
  23. #include "tune.h"
  24. #include <linux/display_state.h>
  25.  
  26. #ifdef CONFIG_SCHED_WALT
  27. unsigned long boosted_cpu_util(int cpu);
  28. #endif
  29.  
  30. /* Stub out fast switch routines present on mainline to reduce the backport
  31.  * overhead. */
  32. #define cpufreq_driver_fast_switch(x, y) 0
  33. #define cpufreq_enable_fast_switch(x)
  34. #define cpufreq_disable_fast_switch(x)
  35. #define LATENCY_MULTIPLIER          (2000)
  36. #define DKGOV_KTHREAD_PRIORITY  50
  37.  
  38. #define BOOST_PERC                  5
  39. #define DEFAULT_RATE_LIMIT_SUSP_NS ((s64)(80000 * NSEC_PER_USEC))
  40.  
  41. struct dkgov_tunables {
  42.     struct gov_attr_set attr_set;
  43.     unsigned int up_rate_limit_us;
  44.     unsigned int down_rate_limit_us;
  45.     /*
  46.      * CPUs frequency scaling
  47.      */
  48.     unsigned int boost_perc;
  49. };
  50.  
  51. struct dkgov_policy {
  52.     struct cpufreq_policy *policy;
  53.  
  54.     struct dkgov_tunables *tunables;
  55.     struct list_head tunables_hook;
  56.  
  57.     raw_spinlock_t update_lock;  /* For shared policies */
  58.     u64 last_freq_update_time;
  59.     s64 min_rate_limit_ns;
  60.     s64 up_rate_delay_ns;
  61.     s64 down_rate_delay_ns;
  62.     s64 up_rate_delay_prev_ns;
  63.     s64 down_rate_delay_prev_ns;
  64.     unsigned int next_freq;
  65.  
  66.     /* The next fields are only needed if fast switch cannot be used. */
  67.     struct irq_work irq_work;
  68.     struct kthread_work work;
  69.     struct mutex work_lock;
  70.     struct kthread_worker worker;
  71.     struct task_struct *thread;
  72.     bool work_in_progress;
  73.  
  74.     bool need_freq_update;
  75. };
  76.  
  77. struct dkgov_cpu {
  78.     struct update_util_data update_util;
  79.     struct dkgov_policy *sg_policy;
  80.  
  81.     unsigned long iowait_boost;
  82.     unsigned long iowait_boost_max;
  83.     u64 last_update;
  84.  
  85.     /* The fields below are only needed when sharing a policy. */
  86.     unsigned long util;
  87.     unsigned long max;
  88.     unsigned int flags;
  89. };
  90.  
  91. static DEFINE_PER_CPU(struct dkgov_cpu, dkgov_cpu);
  92. static DEFINE_PER_CPU(struct dkgov_tunables, cached_tunables);
  93.  
  94. #define LITTLE_NFREQS               18
  95. #define BIG_NFREQS                  28
  96. static unsigned long little_capacity[LITTLE_NFREQS] = {
  97.     0,
  98.     12,
  99.     17,
  100.     21,
  101.     27,
  102.     31,
  103.     37,
  104.     42,
  105.     47,
  106.     52,
  107.     57,
  108.     62,
  109.     70,
  110.     78,
  111.     89,
  112.     103,
  113.     122,
  114.     141,
  115.     160,   
  116. };
  117.  
  118. static unsigned long big_capacity[BIG_NFREQS] = {
  119.     0,
  120.     1736,
  121.     1926,
  122.     2108,
  123.     2284,
  124.     2456,
  125.     2628,
  126.     2804,
  127.     2992,
  128.     3255,
  129.     3499,
  130.     3786,
  131.     4128,
  132.     4535,
  133.     5019,
  134.     5583,
  135.     6226,
  136.     7120,
  137.     8628,
  138.     9344,
  139.     10030,
  140.     10806,
  141.     12045,
  142.     15686,
  143.     25586,
  144.     30000,
  145.     35000,
  146.     40000,
  147. };
  148.  
  149. /************************ Governor internals ***********************/
  150.  
  151. static bool dkgov_should_update_freq(struct dkgov_policy *sg_policy, u64 time)
  152. {
  153.     s64 delta_ns;
  154.  
  155.     if (sg_policy->work_in_progress)
  156.         return false;
  157.  
  158.     if (unlikely(sg_policy->need_freq_update)) {
  159.         sg_policy->need_freq_update = false;
  160.         /*
  161.          * This happens when limits change, so forget the previous
  162.          * next_freq value and force an update.
  163.          */
  164.         sg_policy->next_freq = UINT_MAX;
  165.         return true;
  166.     }
  167.  
  168.     delta_ns = time - sg_policy->last_freq_update_time;
  169.  
  170.     /* No need to recalculate next freq for min_rate_limit_us at least */
  171.     return delta_ns >= sg_policy->min_rate_limit_ns;
  172. }
  173.  
  174. static bool dkgov_up_down_rate_limit(struct dkgov_policy *sg_policy, u64 time,
  175.                      unsigned int next_freq)
  176. {
  177.     /* Create display state boolean */
  178.     const bool display_on = is_display_on();
  179.     s64 delta_ns;
  180.  
  181.     delta_ns = time - sg_policy->last_freq_update_time;
  182.  
  183.     if (!display_on) {
  184.         if (sg_policy->up_rate_delay_ns != sg_policy->up_rate_delay_prev_ns)
  185.             sg_policy->up_rate_delay_ns = sg_policy->up_rate_delay_prev_ns;
  186.         if (sg_policy->down_rate_delay_ns != sg_policy->down_rate_delay_prev_ns)
  187.             sg_policy->down_rate_delay_ns = sg_policy->down_rate_delay_prev_ns;
  188.     } else if (display_on) {
  189.         if (sg_policy->up_rate_delay_ns != DEFAULT_RATE_LIMIT_SUSP_NS) {
  190.             sg_policy->up_rate_delay_prev_ns = sg_policy->up_rate_delay_ns;
  191.             sg_policy->up_rate_delay_ns
  192.                 = max(sg_policy->up_rate_delay_ns,
  193.                     DEFAULT_RATE_LIMIT_SUSP_NS);
  194.         }
  195.         if (sg_policy->down_rate_delay_ns != DEFAULT_RATE_LIMIT_SUSP_NS) {
  196.             sg_policy->down_rate_delay_prev_ns = sg_policy->down_rate_delay_ns;
  197.             sg_policy->down_rate_delay_ns
  198.                 = max(sg_policy->down_rate_delay_ns,
  199.                     DEFAULT_RATE_LIMIT_SUSP_NS);
  200.         }
  201.     }
  202.  
  203.     if (next_freq > sg_policy->next_freq &&
  204.         delta_ns < sg_policy->up_rate_delay_ns)
  205.             return true;
  206.  
  207.     if (next_freq < sg_policy->next_freq &&
  208.         delta_ns < sg_policy->down_rate_delay_ns)
  209.             return true;
  210.  
  211.     return false;
  212. }
  213.  
  214. static void dkgov_update_commit(struct dkgov_policy *sg_policy, u64 time,
  215.                 unsigned int next_freq)
  216. {
  217.     struct cpufreq_policy *policy = sg_policy->policy;
  218.  
  219.     if (dkgov_up_down_rate_limit(sg_policy, time, next_freq))
  220.         return;
  221.  
  222.     sg_policy->last_freq_update_time = time;
  223.  
  224.     if (!next_freq)
  225.         return;
  226.    
  227.     if (policy->fast_switch_enabled) {
  228.         if (policy->cur == next_freq) {
  229.             trace_cpu_frequency(policy->cur, smp_processor_id());
  230.             return;
  231.         }
  232.         sg_policy->next_freq = next_freq;
  233.         next_freq = cpufreq_driver_fast_switch(policy, next_freq);
  234.         if (next_freq == CPUFREQ_ENTRY_INVALID)
  235.             return;
  236.  
  237.         policy->cur = next_freq;
  238.         trace_cpu_frequency(next_freq, smp_processor_id());
  239.     } else if (policy->cur != next_freq) {
  240.         sg_policy->next_freq = next_freq;
  241.         sg_policy->work_in_progress = true;
  242.         irq_work_queue(&sg_policy->irq_work);
  243.     }
  244. }
  245.  
  246. static unsigned int resolve_target_freq(struct cpufreq_policy *policy,
  247.                     unsigned long util)
  248. {
  249.     struct cpufreq_frequency_table *table;
  250.     unsigned int target_freq = 0;
  251.     int i = 0;
  252.  
  253.     if (!policy)
  254.         return 0;
  255.  
  256.     table = policy->freq_table;
  257.     if (policy->cpu < 4) {
  258.         for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
  259.             if (table[i].frequency == CPUFREQ_ENTRY_INVALID
  260.                 || i >= LITTLE_NFREQS)
  261.                 continue;
  262.             if (util < little_capacity[i])
  263.                 break;
  264.             target_freq = table[i].frequency;
  265.         }
  266.     } else {
  267.         for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
  268.             if (table[i].frequency == CPUFREQ_ENTRY_INVALID
  269.                 || i >= BIG_NFREQS)
  270.                 continue;
  271.             if (util < big_capacity[i])
  272.                 break;
  273.             target_freq = table[i].frequency;
  274.         }
  275.     }
  276.     return target_freq;
  277. }
  278.  
  279. /**
  280.  * get_next_freq - Compute a new frequency for a given cpufreq policy.
  281.  * @sg_cpu: darknesssched cpu object to compute the new frequency for.
  282.  * @util: Current CPU utilization.
  283.  * @max: CPU capacity.
  284.  *
  285.  * The lowest driver-supported frequency which is equal or greater than the raw
  286.  * next_freq (as calculated above) is returned, subject to policy min/max and
  287.  * cpufreq driver limitations.
  288.  */
  289. static unsigned int get_next_freq(struct dkgov_cpu *sg_cpu, unsigned long util,
  290.                   unsigned long max)
  291. {
  292.     struct dkgov_policy *sg_policy = sg_cpu->sg_policy;
  293.     struct cpufreq_policy *policy = sg_policy->policy;
  294.     struct dkgov_tunables *tunables = sg_policy->tunables;
  295.     unsigned int next_freq = 0;
  296.     unsigned long cur_util =
  297.             util + ((util * tunables->boost_perc) / 100);
  298.  
  299.     next_freq = resolve_target_freq(policy, cur_util);
  300.     if (sg_policy->next_freq == UINT_MAX && !next_freq) {
  301.         next_freq = arch_scale_freq_invariant() ?
  302.                 policy->cpuinfo.max_freq : policy->cur;
  303.         next_freq = (next_freq + (next_freq >> 2)) * util / max;
  304.         return cpufreq_driver_resolve_freq(policy, next_freq);
  305.     }
  306.     return next_freq;
  307. }
  308.  
  309. static void dkgov_get_util(unsigned long *util, unsigned long *max, u64 time)
  310. {
  311.     int cpu = smp_processor_id();
  312.     struct rq *rq = cpu_rq(cpu);
  313.     unsigned long max_cap, rt;
  314.     s64 delta;
  315.  
  316.     max_cap = arch_scale_cpu_capacity(NULL, cpu);
  317.  
  318.     sched_avg_update(rq);
  319.     delta = time - rq->age_stamp;
  320.     if (unlikely(delta < 0))
  321.         delta = 0;
  322.     rt = div64_u64(rq->rt_avg, sched_avg_period() + delta);
  323.     rt = (rt * max_cap) >> SCHED_CAPACITY_SHIFT;
  324.  
  325.     *util = min(rq->cfs.avg.util_avg + rt, max_cap);
  326. #ifdef CONFIG_SCHED_WALT
  327.     if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
  328.         *util = boosted_cpu_util(cpu);
  329. #endif
  330.     *max = max_cap;
  331. }
  332.  
  333. static void dkgov_set_iowait_boost(struct dkgov_cpu *sg_cpu, u64 time,
  334.                    unsigned int flags)
  335. {
  336.     if (flags & SCHED_CPUFREQ_IOWAIT) {
  337.         sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
  338.     } else if (sg_cpu->iowait_boost) {
  339.         s64 delta_ns = time - sg_cpu->last_update;
  340.  
  341.         /* Clear iowait_boost if the CPU apprears to have been idle. */
  342.         if (delta_ns > TICK_NSEC)
  343.             sg_cpu->iowait_boost = 0;
  344.     }
  345. }
  346.  
  347. static void dkgov_iowait_boost(struct dkgov_cpu *sg_cpu, unsigned long *util,
  348.                    unsigned long *max)
  349. {
  350.     unsigned long boost_util = sg_cpu->iowait_boost;
  351.     unsigned long boost_max = sg_cpu->iowait_boost_max;
  352.  
  353.     if (!boost_util)
  354.         return;
  355.  
  356.     if (*util * boost_max < *max * boost_util) {
  357.         *util = boost_util;
  358.         *max = boost_max;
  359.     }
  360.     sg_cpu->iowait_boost >>= 1;
  361. }
  362.  
  363. static void dkgov_update_single(struct update_util_data *hook, u64 time,
  364.                 unsigned int flags)
  365. {
  366.     struct dkgov_cpu *sg_cpu = container_of(hook, struct dkgov_cpu, update_util);
  367.     struct dkgov_policy *sg_policy = sg_cpu->sg_policy;
  368.     struct cpufreq_policy *policy = sg_policy->policy;
  369.     unsigned long util, max;
  370.     unsigned int next_f;
  371.  
  372.     dkgov_set_iowait_boost(sg_cpu, time, flags);
  373.     sg_cpu->last_update = time;
  374.  
  375.     if (!dkgov_should_update_freq(sg_policy, time))
  376.         return;
  377.  
  378.         if (flags & SCHED_CPUFREQ_DL) {
  379.                     next_f = policy->cpuinfo.max_freq;
  380.     } else {
  381.         dkgov_get_util(&util, &max, time);
  382.         dkgov_iowait_boost(sg_cpu, &util, &max);
  383.         next_f = get_next_freq(sg_cpu, util, max);
  384.     }
  385.     dkgov_update_commit(sg_policy, time, next_f);
  386. }
  387.  
  388. static unsigned int dkgov_next_freq_shared(struct dkgov_cpu *sg_cpu,
  389.                        unsigned long util, unsigned long max,
  390.                        unsigned int flags)
  391. {
  392.     struct dkgov_policy *sg_policy = sg_cpu->sg_policy;
  393.     struct cpufreq_policy *policy = sg_policy->policy;
  394.     unsigned int max_f = policy->cpuinfo.max_freq;
  395.     u64 last_freq_update_time = sg_policy->last_freq_update_time;
  396.     unsigned int j;
  397.  
  398.         if (flags & SCHED_CPUFREQ_DL)
  399.             return max_f;
  400.  
  401.     dkgov_iowait_boost(sg_cpu, &util, &max);
  402.  
  403.     for_each_cpu(j, policy->cpus) {
  404.         struct dkgov_cpu *j_sg_cpu;
  405.         unsigned long j_util, j_max;
  406.         s64 delta_ns;
  407.  
  408.         if (j == smp_processor_id())
  409.             continue;
  410.  
  411.         j_sg_cpu = &per_cpu(dkgov_cpu, j);
  412.         /*
  413.          * If the CPU utilization was last updated before the previous
  414.          * frequency update and the time elapsed between the last update
  415.          * of the CPU utilization and the last frequency update is long
  416.          * enough, don't take the CPU into account as it probably is
  417.          * idle now (and clear iowait_boost for it).
  418.          */
  419.         delta_ns = last_freq_update_time - j_sg_cpu->last_update;
  420.         if (delta_ns > TICK_NSEC) {
  421.             j_sg_cpu->iowait_boost = 0;
  422.             continue;
  423.         }
  424.  
  425.         if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
  426.             return max_f;
  427.        
  428.         j_util = j_sg_cpu->util;
  429.         j_max = j_sg_cpu->max;
  430.         if (j_util * max > j_max * util) {
  431.             util = j_util;
  432.             max = j_max;
  433.         }
  434.  
  435.         dkgov_iowait_boost(j_sg_cpu, &util, &max);
  436.  
  437.     }
  438.  
  439.     return get_next_freq(sg_cpu, util, max);
  440. }
  441.  
  442. static void dkgov_update_shared(struct update_util_data *hook, u64 time,
  443.                 unsigned int flags)
  444. {
  445.     struct dkgov_cpu *sg_cpu = container_of(hook, struct dkgov_cpu, update_util);
  446.     struct dkgov_policy *sg_policy = sg_cpu->sg_policy;
  447.     unsigned long util, max;
  448.     unsigned int next_f;
  449.  
  450.     dkgov_get_util(&util, &max, time);
  451.  
  452.     raw_spin_lock(&sg_policy->update_lock);
  453.  
  454.     sg_cpu->util = util;
  455.     sg_cpu->max = max;
  456.     sg_cpu->flags = flags;
  457.  
  458.     dkgov_set_iowait_boost(sg_cpu, time, flags);
  459.     sg_cpu->last_update = time;
  460.  
  461.     if (dkgov_should_update_freq(sg_policy, time)) {
  462.         next_f = dkgov_next_freq_shared(sg_cpu, util, max, flags);
  463.         dkgov_update_commit(sg_policy, time, next_f);
  464.     }
  465.  
  466.     raw_spin_unlock(&sg_policy->update_lock);
  467. }
  468.  
  469. static void dkgov_work(struct kthread_work *work)
  470. {
  471.     struct dkgov_policy *sg_policy = container_of(work, struct dkgov_policy, work);
  472.  
  473.     mutex_lock(&sg_policy->work_lock);
  474.     __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
  475.                 CPUFREQ_RELATION_L);
  476.     mutex_unlock(&sg_policy->work_lock);
  477.  
  478.     sg_policy->work_in_progress = false;
  479. }
  480.  
  481. static void dkgov_irq_work(struct irq_work *irq_work)
  482. {
  483.     struct dkgov_policy *sg_policy;
  484.  
  485.     sg_policy = container_of(irq_work, struct dkgov_policy, irq_work);
  486.  
  487.     /*
  488.      * For Real Time and Deadline tasks, schedutil governor shoots the
  489.      * frequency to maximum. And special care must be taken to ensure that
  490.      * this kthread doesn't result in that.
  491.      *
  492.      * This is (mostly) guaranteed by the work_in_progress flag. The flag is
  493.      * updated only at the end of the dkgov_work() and before that schedutil
  494.      * rejects all other frequency scaling requests.
  495.      *
  496.      * Though there is a very rare case where the RT thread yields right
  497.      * after the work_in_progress flag is cleared. The effects of that are
  498.      * neglected for now.
  499.      */
  500.     queue_kthread_work(&sg_policy->worker, &sg_policy->work);
  501. }
  502.  
  503. /************************** sysfs interface ************************/
  504.  
  505. static struct dkgov_tunables *global_tunables;
  506. static DEFINE_MUTEX(global_tunables_lock);
  507.  
  508. static inline struct dkgov_tunables *to_dkgov_tunables(struct gov_attr_set *attr_set)
  509. {
  510.     return container_of(attr_set, struct dkgov_tunables, attr_set);
  511. }
  512.  
  513. static DEFINE_MUTEX(min_rate_lock);
  514.  
  515. static void update_min_rate_limit_us(struct dkgov_policy *sg_policy)
  516. {
  517.     mutex_lock(&min_rate_lock);
  518.     sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
  519.                        sg_policy->down_rate_delay_ns);
  520.     mutex_unlock(&min_rate_lock);
  521. }
  522.  
  523. /* up_rate_limit_us */
  524. static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
  525. {
  526.     struct dkgov_tunables *tunables = to_dkgov_tunables(attr_set);
  527.  
  528.     return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
  529. }
  530.  
  531. /* down_rate_limit_us */
  532. static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
  533. {
  534.     struct dkgov_tunables *tunables = to_dkgov_tunables(attr_set);
  535.  
  536.     return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
  537. }
  538.  
  539. /* boost_perc */
  540. static ssize_t boost_perc_show(struct gov_attr_set *attr_set, char *buf)
  541. {
  542.     struct dkgov_tunables *tunables = to_dkgov_tunables(attr_set);
  543.  
  544.     return sprintf(buf, "%u\n", tunables->boost_perc);
  545. }
  546.  
  547. /* up_rate_limit_us */
  548. static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
  549.                       const char *buf, size_t count)
  550. {
  551.     struct dkgov_tunables *tunables = to_dkgov_tunables(attr_set);
  552.     struct dkgov_policy *sg_policy;
  553.     unsigned int rate_limit_us;
  554.  
  555.     if (kstrtouint(buf, 10, &rate_limit_us))
  556.         return -EINVAL;
  557.  
  558.     tunables->up_rate_limit_us = rate_limit_us;
  559.  
  560.     list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
  561.         sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
  562.         sg_policy->up_rate_delay_prev_ns = rate_limit_us * NSEC_PER_USEC;
  563.         update_min_rate_limit_us(sg_policy);
  564.     }
  565.  
  566.     return count;
  567. }
  568.  
  569. /* down_rate_limit_us */
  570. static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
  571.                     const char *buf, size_t count)
  572. {
  573.     struct dkgov_tunables *tunables = to_dkgov_tunables(attr_set);
  574.     struct dkgov_policy *sg_policy;
  575.     unsigned int rate_limit_us;
  576.  
  577.     if (kstrtouint(buf, 10, &rate_limit_us))
  578.         return -EINVAL;
  579.  
  580.     tunables->down_rate_limit_us = rate_limit_us;
  581.  
  582.     list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
  583.         sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
  584.         sg_policy->down_rate_delay_prev_ns = rate_limit_us * NSEC_PER_USEC;
  585.         update_min_rate_limit_us(sg_policy);
  586.     }
  587.  
  588.     return count;
  589. }
  590.  
  591. /* boost_perc */
  592. static ssize_t boost_perc_store(struct gov_attr_set *attr_set,
  593.                     const char *buf, size_t count)
  594. {
  595.     struct dkgov_tunables *tunables = to_dkgov_tunables(attr_set);
  596.     int input;
  597.  
  598.     if (kstrtouint(buf, 10, &input))
  599.         return -EINVAL;
  600.  
  601.     input = min(max(0, input), 20);
  602.  
  603.     if (input == tunables->boost_perc)
  604.         return count;
  605.  
  606.     tunables->boost_perc = input;
  607.  
  608.     return count;
  609. }
  610.  
  611. static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
  612. static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
  613. static struct governor_attr boost_perc = __ATTR_RW(boost_perc);
  614.  
  615. static struct attribute *dkgov_attributes[] = {
  616.     &up_rate_limit_us.attr,
  617.     &down_rate_limit_us.attr,
  618.     &boost_perc.attr,
  619.     NULL
  620. };
  621.  
  622. static struct kobj_type dkgov_tunables_ktype = {
  623.     .default_attrs = dkgov_attributes,
  624.     .sysfs_ops = &governor_sysfs_ops,
  625. };
  626.  
  627. /********************** cpufreq governor interface *********************/
  628.  
  629. static struct cpufreq_governor darknesssched_gov;
  630.  
  631. static struct dkgov_policy *dkgov_policy_alloc(struct cpufreq_policy *policy)
  632. {
  633.     struct dkgov_policy *sg_policy;
  634.  
  635.     sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
  636.     if (!sg_policy)
  637.         return NULL;
  638.  
  639.     sg_policy->policy = policy;
  640.     init_irq_work(&sg_policy->irq_work, dkgov_irq_work);
  641.     mutex_init(&sg_policy->work_lock);
  642.     raw_spin_lock_init(&sg_policy->update_lock);
  643.     return sg_policy;
  644. }
  645.  
  646. static void dkgov_policy_free(struct dkgov_policy *sg_policy)
  647. {
  648.     mutex_destroy(&sg_policy->work_lock);
  649.     kfree(sg_policy);
  650. }
  651.  
  652. static int dkgov_kthread_create(struct dkgov_policy *sg_policy)
  653. {
  654.     struct task_struct *thread;
  655.     struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
  656.     struct cpufreq_policy *policy = sg_policy->policy;
  657.     int ret;
  658.  
  659.     /* kthread only required for slow path */
  660.     if (policy->fast_switch_enabled)
  661.         return 0;
  662.  
  663.     init_kthread_work(&sg_policy->work, dkgov_work);
  664.     init_kthread_worker(&sg_policy->worker);
  665.     thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
  666.                 "dkgov:%d",
  667.                 cpumask_first(policy->related_cpus));
  668.     if (IS_ERR(thread)) {
  669.         pr_err("failed to create dkgov thread: %ld\n", PTR_ERR(thread));
  670.         return PTR_ERR(thread);
  671.     }
  672.  
  673.     ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
  674.     if (ret) {
  675.         kthread_stop(thread);
  676.         pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
  677.         return ret;
  678.     }
  679.  
  680.     sg_policy->thread = thread;
  681.     kthread_bind_mask(thread, policy->related_cpus);
  682.     /* NB: wake up so the thread does not look hung to the freezer */
  683.     wake_up_process(thread);
  684.  
  685.     return 0;
  686. }
  687.  
  688. static void dkgov_kthread_stop(struct dkgov_policy *sg_policy)
  689. {
  690.     /* kthread only required for slow path */
  691.     if (sg_policy->policy->fast_switch_enabled)
  692.         return;
  693.  
  694.     flush_kthread_worker(&sg_policy->worker);
  695.     kthread_stop(sg_policy->thread);
  696. }
  697.  
  698. static struct dkgov_tunables *dkgov_tunables_alloc(struct dkgov_policy *sg_policy)
  699. {
  700.     struct dkgov_tunables *tunables;
  701.  
  702.     tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
  703.     if (tunables) {
  704.         gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
  705.         if (!have_governor_per_policy())
  706.             global_tunables = tunables;
  707.     }
  708.     return tunables;
  709. }
  710.  
  711. static void dkgov_tunables_free(struct dkgov_tunables *tunables)
  712. {
  713.     if (!have_governor_per_policy())
  714.         global_tunables = NULL;
  715.  
  716.     kfree(tunables);
  717. }
  718.  
  719. static void store_tunables_data(struct dkgov_tunables *tunables,
  720.         struct cpufreq_policy *policy)
  721. {
  722.     struct dkgov_tunables *ptunables;
  723.     unsigned int cpu = cpumask_first(policy->related_cpus);
  724.  
  725.     ptunables = &per_cpu(cached_tunables, cpu);
  726.     if (!ptunables)
  727.         return;
  728.     ptunables->up_rate_limit_us = tunables->up_rate_limit_us;
  729.     ptunables->down_rate_limit_us = tunables->down_rate_limit_us;
  730.     ptunables->boost_perc = tunables->boost_perc;
  731.     pr_debug("tunables data saved for cpu[%u]\n", cpu);
  732. }
  733.  
  734. static void get_tunables_data(struct dkgov_tunables *tunables,
  735.         struct cpufreq_policy *policy)
  736. {
  737.     struct dkgov_tunables *ptunables;
  738.     unsigned int lat;
  739.     unsigned int cpu = cpumask_first(policy->related_cpus);
  740.  
  741.     ptunables = &per_cpu(cached_tunables, cpu);
  742.     if (!ptunables)
  743.         goto initialize;
  744.  
  745.     if (ptunables->up_rate_limit_us > 0) {
  746.         tunables->up_rate_limit_us = ptunables->up_rate_limit_us;
  747.         tunables->down_rate_limit_us = ptunables->down_rate_limit_us;
  748.         tunables->boost_perc = ptunables->boost_perc;
  749.         pr_debug("tunables data restored for cpu[%u]\n", cpu);
  750.         goto out;
  751.     }
  752.  
  753. initialize:
  754.     tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
  755.     tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
  756.     lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
  757.     if (lat) {
  758.         tunables->up_rate_limit_us *= lat;
  759.         tunables->down_rate_limit_us *= lat;
  760.     }
  761.     tunables->boost_perc = BOOST_PERC;
  762.     pr_debug("tunables data initialized for cpu[%u]\n", cpu);
  763. out:
  764.     return;
  765. }
  766.  
  767. static int dkgov_init(struct cpufreq_policy *policy)
  768. {
  769.     struct dkgov_policy *sg_policy;
  770.     struct dkgov_tunables *tunables;
  771.     int ret = 0;
  772.  
  773.     /* State should be equivalent to EXIT */
  774.     if (policy->governor_data)
  775.         return -EBUSY;
  776.  
  777.     sg_policy = dkgov_policy_alloc(policy);
  778.     if (!sg_policy)
  779.         return -ENOMEM;
  780.  
  781.     ret = dkgov_kthread_create(sg_policy);
  782.     if (ret)
  783.         goto free_sg_policy;
  784.  
  785.     mutex_lock(&global_tunables_lock);
  786.  
  787.     if (global_tunables) {
  788.         if (WARN_ON(have_governor_per_policy())) {
  789.             ret = -EINVAL;
  790.             goto stop_kthread;
  791.         }
  792.         policy->governor_data = sg_policy;
  793.         sg_policy->tunables = global_tunables;
  794.  
  795.         gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
  796.         goto out;
  797.     }
  798.  
  799.     tunables = dkgov_tunables_alloc(sg_policy);
  800.     if (!tunables) {
  801.         ret = -ENOMEM;
  802.         goto stop_kthread;
  803.     }
  804.  
  805.     get_tunables_data(tunables, policy);
  806.     policy->governor_data = sg_policy;
  807.     sg_policy->tunables = tunables;
  808.  
  809.     ret = kobject_init_and_add(&tunables->attr_set.kobj, &dkgov_tunables_ktype,
  810.                    get_governor_parent_kobj(policy), "%s",
  811.                    darknesssched_gov.name);
  812.     if (ret)
  813.         goto fail;
  814.  
  815.  out:
  816.     mutex_unlock(&global_tunables_lock);
  817.  
  818.     cpufreq_enable_fast_switch(policy);
  819.     return 0;
  820.  
  821.  fail:
  822.     policy->governor_data = NULL;
  823.     dkgov_tunables_free(tunables);
  824.  
  825. stop_kthread:
  826.     dkgov_kthread_stop(sg_policy);
  827.  
  828. free_sg_policy:
  829.     mutex_unlock(&global_tunables_lock);
  830.  
  831.     dkgov_policy_free(sg_policy);
  832.     pr_err("initialization failed (error %d)\n", ret);
  833.     return ret;
  834. }
  835.  
  836. static void dkgov_exit(struct cpufreq_policy *policy)
  837. {
  838.     struct dkgov_policy *sg_policy = policy->governor_data;
  839.     struct dkgov_tunables *tunables = sg_policy->tunables;
  840.     unsigned int count;
  841.  
  842.     cpufreq_disable_fast_switch(policy);
  843.  
  844.     mutex_lock(&global_tunables_lock);
  845.  
  846.     store_tunables_data(sg_policy->tunables, policy);
  847.     count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
  848.     policy->governor_data = NULL;
  849.     if (!count)
  850.         dkgov_tunables_free(tunables);
  851.  
  852.     mutex_unlock(&global_tunables_lock);
  853.  
  854.     dkgov_kthread_stop(sg_policy);
  855.     dkgov_policy_free(sg_policy);
  856. }
  857.  
  858. static int dkgov_start(struct cpufreq_policy *policy)
  859. {
  860.     struct dkgov_policy *sg_policy = policy->governor_data;
  861.     unsigned int cpu;
  862.  
  863.     sg_policy->up_rate_delay_ns =
  864.         sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
  865.     sg_policy->down_rate_delay_ns =
  866.         sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
  867.     sg_policy->up_rate_delay_prev_ns =
  868.         sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
  869.     sg_policy->down_rate_delay_prev_ns =
  870.         sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
  871.     update_min_rate_limit_us(sg_policy);
  872.     sg_policy->last_freq_update_time = 0;
  873.     sg_policy->next_freq = UINT_MAX;
  874.     sg_policy->work_in_progress = false;
  875.     sg_policy->need_freq_update = false;
  876.  
  877.     for_each_cpu(cpu, policy->cpus) {
  878.         struct dkgov_cpu *sg_cpu = &per_cpu(dkgov_cpu, cpu);
  879.  
  880.         sg_cpu->sg_policy = sg_policy;
  881.         if (policy_is_shared(policy)) {
  882.             sg_cpu->util = 0;
  883.             sg_cpu->max = 0;
  884.             sg_cpu->flags = SCHED_CPUFREQ_DL;
  885.             sg_cpu->last_update = 0;
  886.             sg_cpu->iowait_boost = 0;
  887.             sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
  888.             cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
  889.                              dkgov_update_shared);
  890.         } else {
  891.             cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
  892.                              dkgov_update_single);
  893.         }
  894.     }
  895.     return 0;
  896. }
  897.  
  898. static void dkgov_stop(struct cpufreq_policy *policy)
  899. {
  900.     struct dkgov_policy *sg_policy = policy->governor_data;
  901.     unsigned int cpu;
  902.  
  903.     for_each_cpu(cpu, policy->cpus)
  904.         cpufreq_remove_update_util_hook(cpu);
  905.  
  906.     synchronize_sched();
  907.  
  908.     irq_work_sync(&sg_policy->irq_work);
  909.     kthread_cancel_work_sync(&sg_policy->work);
  910. }
  911.  
  912. static void dkgov_limits(struct cpufreq_policy *policy)
  913. {
  914.     struct dkgov_policy *sg_policy = policy->governor_data;
  915.  
  916.     if (!policy->fast_switch_enabled) {
  917.         mutex_lock(&sg_policy->work_lock);
  918.         cpufreq_policy_apply_limits(policy);
  919.         mutex_unlock(&sg_policy->work_lock);
  920.     }
  921.  
  922.     sg_policy->need_freq_update = true;
  923. }
  924.  
  925. static struct cpufreq_governor darknesssched_gov = {
  926.     .name = "darknesssched",
  927.     .owner = THIS_MODULE,
  928.     .init = dkgov_init,
  929.     .exit = dkgov_exit,
  930.     .start = dkgov_start,
  931.     .stop = dkgov_stop,
  932.     .limits = dkgov_limits,
  933. };
  934.  
  935. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_DARKNESSSCHED
  936. struct cpufreq_governor *cpufreq_default_governor(void)
  937. {
  938.     return &darknesssched_gov;
  939. }
  940. #endif
  941.  
  942. static int __init dkgov_register(void)
  943. {
  944.     return cpufreq_register_governor(&darknesssched_gov);
  945. }
  946. fs_initcall(dkgov_register);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement