Advertisement
Guest User

Untitled

a guest
Jul 20th, 2018
232
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 23.82 KB | None | 0 0
  1. /*
  2.  * linux/include/linux/cpufreq.h
  3.  *
  4.  * Copyright (C) 2001 Russell King
  5.  *           (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
  6.  *
  7.  * This program is free software; you can redistribute it and/or modify
  8.  * it under the terms of the GNU General Public License version 2 as
  9.  * published by the Free Software Foundation.
  10.  */
  11. #ifndef _LINUX_CPUFREQ_H
  12. #define _LINUX_CPUFREQ_H
  13.  
  14. #include <linux/clk.h>
  15. #include <linux/cpumask.h>
  16. #include <linux/completion.h>
  17. #include <linux/kobject.h>
  18. #include <linux/notifier.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/sysfs.h>
  21.  
  22. /*********************************************************************
  23.  *                        CPUFREQ INTERFACE                          *
  24.  *********************************************************************/
  25. /*
  26.  * Frequency values here are CPU kHz
  27.  *
  28.  * Maximum transition latency is in nanoseconds - if it's unknown,
  29.  * CPUFREQ_ETERNAL shall be used.
  30.  */
  31.  
  32. #define CPUFREQ_ETERNAL         (-1)
  33. #define CPUFREQ_NAME_LEN        16
  34. /* Print length for names. Extra 1 space for accomodating '\n' in prints */
  35. #define CPUFREQ_NAME_PLEN       (CPUFREQ_NAME_LEN + 1)
  36.  
  37. struct cpufreq_governor;
  38.  
  39. struct cpufreq_freqs {
  40.     unsigned int cpu;   /* cpu nr */
  41.     unsigned int old;
  42.     unsigned int new;
  43.     u8 flags;       /* flags of cpufreq_driver, see below. */
  44. };
  45.  
  46. struct cpufreq_cpuinfo {
  47.     unsigned int        max_freq;
  48.     unsigned int        min_freq;
  49.  
  50.     /* in 10^(-9) s = nanoseconds */
  51.     unsigned int        transition_latency;
  52. };
  53.  
  54. struct cpufreq_user_policy {
  55.     unsigned int        min;    /* in kHz */
  56.     unsigned int        max;    /* in kHz */
  57. };
  58.  
  59. struct cpufreq_policy {
  60.     /* CPUs sharing clock, require sw coordination */
  61.     cpumask_var_t       cpus;   /* Online CPUs only */
  62.     cpumask_var_t       related_cpus; /* Online + Offline CPUs */
  63.     cpumask_var_t       real_cpus; /* Related and present */
  64.  
  65.     unsigned int        shared_type; /* ACPI: ANY or ALL affected CPUs
  66.                         should set cpufreq */
  67.     unsigned int        cpu;    /* cpu managing this policy, must be online */
  68.  
  69.     struct clk      *clk;
  70.     struct cpufreq_cpuinfo  cpuinfo;/* see above */
  71.  
  72.     unsigned int        min;    /* in kHz */
  73.     unsigned int        max;    /* in kHz */
  74.     unsigned int        cur;    /* in kHz, only needed if cpufreq
  75.                      * governors are used */
  76.     unsigned int        restore_freq; /* = policy->cur before transition */
  77.     unsigned int        suspend_freq; /* freq to set during suspend */
  78.  
  79.     unsigned int        policy; /* see above */
  80.     unsigned int        last_policy; /* policy before unplug */
  81.     struct cpufreq_governor *governor; /* see below */
  82.     void            *governor_data;
  83.     bool            governor_enabled; /* governor start/stop flag */
  84.     char            last_governor[CPUFREQ_NAME_LEN]; /* last governor used */
  85.  
  86.     struct work_struct  update; /* if update_policy() needs to be
  87.                      * called, but you're in IRQ context */
  88.  
  89.     struct cpufreq_user_policy user_policy;
  90.     struct cpufreq_frequency_table  *freq_table;
  91.  
  92.     struct list_head        policy_list;
  93.     struct kobject      kobj;
  94.     struct completion   kobj_unregister;
  95.  
  96.     /*
  97.      * The rules for this semaphore:
  98.      * - Any routine that wants to read from the policy structure will
  99.      *   do a down_read on this semaphore.
  100.      * - Any routine that will write to the policy structure and/or may take away
  101.      *   the policy altogether (eg. CPU hotplug), will hold this lock in write
  102.      *   mode before doing so.
  103.      *
  104.      * Additional rules:
  105.      * - Lock should not be held across
  106.      *     __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
  107.      */
  108.     struct rw_semaphore rwsem;
  109.  
  110.  
  111.     /*
  112.      * Fast switch flags:
  113.      * - fast_switch_possible should be set by the driver if it can
  114.      *   guarantee that frequency can be changed on any CPU sharing the
  115.      *   policy and that the change will affect all of the policy CPUs then.
  116.      * - fast_switch_enabled is to be set by governors that support fast
  117.      *   freqnency switching with the help of cpufreq_enable_fast_switch().
  118.      */
  119.     bool                    fast_switch_possible;
  120.     bool                    fast_switch_enabled;
  121.  
  122.     /*
  123.      * Preferred average time interval between consecutive invocations of
  124.      * the driver to set the frequency for this policy.  To be set by the
  125.      * scaling driver (0, which is the default, means no preference).
  126.      */
  127.     unsigned int        up_transition_delay_us;
  128.     unsigned int        down_transition_delay_us;
  129.  
  130.     /* Boost switch for tasks with p->in_iowait set */
  131.     bool iowait_boost_enable;
  132.  
  133.      /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
  134.     unsigned int cached_target_freq;
  135.     int cached_resolved_idx;
  136.  
  137.     /* Synchronization for frequency transitions */
  138.     bool            transition_ongoing; /* Tracks transition status */
  139.     spinlock_t      transition_lock;
  140.     wait_queue_head_t   transition_wait;
  141.     struct task_struct  *transition_task; /* Task which is doing the transition */
  142.  
  143.     /* cpufreq-stats */
  144.     struct cpufreq_stats    *stats;
  145.  
  146.     /* For cpufreq driver's internal use */
  147.     void            *driver_data;
  148. };
  149.  
  150. /* Only for ACPI */
  151. #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
  152. #define CPUFREQ_SHARED_TYPE_HW   (1) /* HW does needed coordination */
  153. #define CPUFREQ_SHARED_TYPE_ALL  (2) /* All dependent CPUs should set freq */
  154. #define CPUFREQ_SHARED_TYPE_ANY  (3) /* Freq can be set from any dependent CPU*/
  155.  
  156. #ifdef CONFIG_CPU_FREQ
  157. struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
  158. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
  159. void cpufreq_cpu_put(struct cpufreq_policy *policy);
  160. extern unsigned int cluster1_first_cpu;
  161. extern bool fp_irq_cnt;
  162. extern void c0_cpufreq_limit_queue(void);
  163. extern void c1_cpufreq_limit_queue(void);
  164. #else
  165. static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
  166. {
  167.     return NULL;
  168. }
  169. static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  170. {
  171.     return NULL;
  172. }
  173. static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
  174. #endif
  175.  
  176. static inline bool policy_is_shared(struct cpufreq_policy *policy)
  177. {
  178.     return cpumask_weight(policy->cpus) > 1;
  179. }
  180.  
  181. /* /sys/devices/system/cpu/cpufreq: entry point for global variables */
  182. extern struct kobject *cpufreq_global_kobject;
  183.  
  184. #ifdef CONFIG_CPU_FREQ
  185. unsigned int cpufreq_get(unsigned int cpu);
  186. unsigned int cpufreq_quick_get(unsigned int cpu);
  187. unsigned int cpufreq_quick_get_max(unsigned int cpu);
  188. void disable_cpufreq(void);
  189.  
  190. u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
  191. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
  192. int cpufreq_update_policy(unsigned int cpu);
  193. bool have_governor_per_policy(void);
  194. bool cpufreq_driver_is_slow(void);
  195. struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
  196. #else
  197. static inline unsigned int cpufreq_get(unsigned int cpu)
  198. {
  199.     return 0;
  200. }
  201. static inline unsigned int cpufreq_quick_get(unsigned int cpu)
  202. {
  203.     return 0;
  204. }
  205. static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
  206. {
  207.     return 0;
  208. }
  209. static inline void disable_cpufreq(void) { }
  210. #endif
  211.  
  212. /*********************************************************************
  213.  *                      CPUFREQ DRIVER INTERFACE                     *
  214.  *********************************************************************/
  215.  
  216. #define CPUFREQ_RELATION_L 0  /* lowest frequency at or above target */
  217. #define CPUFREQ_RELATION_H 1  /* highest frequency below or at target */
  218. #define CPUFREQ_RELATION_C 2  /* closest frequency to target */
  219.  
  220. struct freq_attr {
  221.     struct attribute attr;
  222.     ssize_t (*show)(struct cpufreq_policy *, char *);
  223.     ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
  224. };
  225.  
  226. #define cpufreq_freq_attr_ro(_name)     \
  227. static struct freq_attr _name =         \
  228. __ATTR(_name, 0444, show_##_name, NULL)
  229.  
  230. #define cpufreq_freq_attr_ro_perm(_name, _perm) \
  231. static struct freq_attr _name =         \
  232. __ATTR(_name, _perm, show_##_name, NULL)
  233.  
  234. #define cpufreq_freq_attr_rw(_name)     \
  235. static struct freq_attr _name =         \
  236. __ATTR(_name, 0644, show_##_name, store_##_name)
  237.  
  238. struct global_attr {
  239.     struct attribute attr;
  240.     ssize_t (*show)(struct kobject *kobj,
  241.             struct attribute *attr, char *buf);
  242.     ssize_t (*store)(struct kobject *a, struct attribute *b,
  243.              const char *c, size_t count);
  244. };
  245.  
  246. #define define_one_global_ro(_name)     \
  247. static struct global_attr _name =       \
  248. __ATTR(_name, 0444, show_##_name, NULL)
  249.  
  250. #define define_one_global_rw(_name)     \
  251. static struct global_attr _name =       \
  252. __ATTR(_name, 0644, show_##_name, store_##_name)
  253.  
  254.  
  255. struct cpufreq_driver {
  256.     char        name[CPUFREQ_NAME_LEN];
  257.     u8      flags;
  258.     void        *driver_data;
  259.  
  260.     /* needed by all drivers */
  261.     int     (*init)(struct cpufreq_policy *policy);
  262.     int     (*verify)(struct cpufreq_policy *policy);
  263.  
  264.     /* define one out of two */
  265.     int     (*setpolicy)(struct cpufreq_policy *policy);
  266.  
  267.     /*
  268.      * On failure, should always restore frequency to policy->restore_freq
  269.      * (i.e. old freq).
  270.      */
  271.     int     (*target)(struct cpufreq_policy *policy,
  272.                   unsigned int target_freq,
  273.                   unsigned int relation);   /* Deprecated */
  274.     int     (*target_index)(struct cpufreq_policy *policy,
  275.                     unsigned int index);
  276.     /*
  277.      * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
  278.      * unset.
  279.      *
  280.      * get_intermediate should return a stable intermediate frequency
  281.      * platform wants to switch to and target_intermediate() should set CPU
  282.      * to to that frequency, before jumping to the frequency corresponding
  283.      * to 'index'. Core will take care of sending notifications and driver
  284.      * doesn't have to handle them in target_intermediate() or
  285.      * target_index().
  286.      *
  287.      * Drivers can return '0' from get_intermediate() in case they don't
  288.      * wish to switch to intermediate frequency for some target frequency.
  289.      * In that case core will directly call ->target_index().
  290.      */
  291.     unsigned int    (*get_intermediate)(struct cpufreq_policy *policy,
  292.                         unsigned int index);
  293.     int     (*target_intermediate)(struct cpufreq_policy *policy,
  294.                            unsigned int index);
  295.  
  296.     /* should be defined, if possible */
  297.     unsigned int    (*get)(unsigned int cpu);
  298.  
  299.     /* optional */
  300.     int     (*bios_limit)(int cpu, unsigned int *limit);
  301.  
  302.     int     (*exit)(struct cpufreq_policy *policy);
  303.     void        (*stop_cpu)(struct cpufreq_policy *policy);
  304.     int     (*suspend)(struct cpufreq_policy *policy);
  305.     int     (*resume)(struct cpufreq_policy *policy);
  306.  
  307.     /* Will be called after the driver is fully initialized */
  308.     void        (*ready)(struct cpufreq_policy *policy);
  309.  
  310.     struct freq_attr **attr;
  311.  
  312.     /* platform specific boost support code */
  313.     bool        boost_supported;
  314.     bool        boost_enabled;
  315.     int     (*set_boost)(int state);
  316. };
  317.  
  318. /* flags */
  319. #define CPUFREQ_STICKY      (1 << 0)    /* driver isn't removed even if
  320.                            all ->init() calls failed */
  321. #define CPUFREQ_CONST_LOOPS (1 << 1)    /* loops_per_jiffy or other
  322.                            kernel "constants" aren't
  323.                            affected by frequency
  324.                            transitions */
  325. #define CPUFREQ_PM_NO_WARN  (1 << 2)    /* don't warn on suspend/resume
  326.                            speed mismatches */
  327.  
  328. /*
  329.  * This should be set by platforms having multiple clock-domains, i.e.
  330.  * supporting multiple policies. With this sysfs directories of governor would
  331.  * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
  332.  * governor with different tunables for different clusters.
  333.  */
  334. #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
  335.  
  336. /*
  337.  * Driver will do POSTCHANGE notifications from outside of their ->target()
  338.  * routine and so must set cpufreq_driver->flags with this flag, so that core
  339.  * can handle them specially.
  340.  */
  341. #define CPUFREQ_ASYNC_NOTIFICATION  (1 << 4)
  342.  
  343. /*
  344.  * Set by drivers which want cpufreq core to check if CPU is running at a
  345.  * frequency present in freq-table exposed by the driver. For these drivers if
  346.  * CPU is found running at an out of table freq, we will try to set it to a freq
  347.  * from the table. And if that fails, we will stop further boot process by
  348.  * issuing a BUG_ON().
  349.  */
  350. #define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
  351.  
  352. /*
  353.  * Indicates that it is safe to call cpufreq_driver_target from
  354.  * non-interruptable context in scheduler hot paths.  Drivers must
  355.  * opt-in to this flag, as the safe default is that they might sleep
  356.  * or be too slow for hot path use.
  357.  */
  358. #define CPUFREQ_DRIVER_FAST     (1 << 6)
  359.  
  360. int cpufreq_register_driver(struct cpufreq_driver *driver_data);
  361. int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
  362.  
  363. const char *cpufreq_get_current_driver(void);
  364. void *cpufreq_get_driver_data(void);
  365.  
  366. static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
  367.         unsigned int min, unsigned int max)
  368. {
  369.     if (policy->min < min)
  370.         policy->min = min;
  371.     if (policy->max < min)
  372.         policy->max = min;
  373.     if (policy->min > max)
  374.         policy->min = max;
  375.     if (policy->max > max)
  376.         policy->max = max;
  377.     if (policy->min > policy->max)
  378.         policy->min = policy->max;
  379.     return;
  380. }
  381.  
  382. static inline void
  383. cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
  384. {
  385.     cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
  386.             policy->cpuinfo.max_freq);
  387. }
  388.  
  389. #ifdef CONFIG_CPU_FREQ
  390. void cpufreq_suspend(void);
  391. void cpufreq_resume(void);
  392. int cpufreq_generic_suspend(struct cpufreq_policy *policy);
  393. #else
  394. static inline void cpufreq_suspend(void) {}
  395. static inline void cpufreq_resume(void) {}
  396. #endif
  397.  
  398. /*********************************************************************
  399.  *                     CPUFREQ NOTIFIER INTERFACE                    *
  400.  *********************************************************************/
  401.  
  402. #define CPUFREQ_TRANSITION_NOTIFIER (0)
  403. #define CPUFREQ_POLICY_NOTIFIER     (1)
  404. #define CPUFREQ_GOVINFO_NOTIFIER    (2)
  405.  
  406. /* Transition notifiers */
  407. #define CPUFREQ_PRECHANGE       (0)
  408. #define CPUFREQ_POSTCHANGE      (1)
  409.  
  410. /* Policy Notifiers  */
  411. #define CPUFREQ_ADJUST          (0)
  412. #define CPUFREQ_NOTIFY          (1)
  413. #define CPUFREQ_START           (2)
  414. #define CPUFREQ_CREATE_POLICY       (3)
  415. #define CPUFREQ_REMOVE_POLICY       (4)
  416.  
  417. /* Govinfo Notifiers */
  418. #define CPUFREQ_LOAD_CHANGE     (0)
  419.  
  420. #ifdef CONFIG_CPU_FREQ
  421. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
  422. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
  423.  
  424. void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
  425.         struct cpufreq_freqs *freqs);
  426. void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
  427.         struct cpufreq_freqs *freqs, int transition_failed);
  428. /*
  429.  * Governor specific info that can be passed to modules that subscribe
  430.  * to CPUFREQ_GOVINFO_NOTIFIER
  431.  */
  432. struct cpufreq_govinfo {
  433.     unsigned int cpu;
  434.     unsigned int load;
  435.     unsigned int sampling_rate_us;
  436. };
  437. extern struct atomic_notifier_head cpufreq_govinfo_notifier_list;
  438.  
  439. #else /* CONFIG_CPU_FREQ */
  440. static inline int cpufreq_register_notifier(struct notifier_block *nb,
  441.                         unsigned int list)
  442. {
  443.     return 0;
  444. }
  445. static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
  446.                         unsigned int list)
  447. {
  448.     return 0;
  449. }
  450. #endif /* !CONFIG_CPU_FREQ */
  451.  
  452. /**
  453.  * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
  454.  * safe)
  455.  * @old:   old value
  456.  * @div:   divisor
  457.  * @mult:  multiplier
  458.  *
  459.  *
  460.  * new = old * mult / div
  461.  */
  462. static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
  463.         u_int mult)
  464. {
  465. #if BITS_PER_LONG == 32
  466.     u64 result = ((u64) old) * ((u64) mult);
  467.     do_div(result, div);
  468.     return (unsigned long) result;
  469.  
  470. #elif BITS_PER_LONG == 64
  471.     unsigned long result = old * ((u64) mult);
  472.     result /= div;
  473.     return result;
  474. #endif
  475. }
  476.  
  477. /*********************************************************************
  478.  *                          CPUFREQ GOVERNORS                        *
  479.  *********************************************************************/
  480.  
  481. /*
  482.  * If (cpufreq_driver->target) exists, the ->governor decides what frequency
  483.  * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
  484.  * two generic policies are available:
  485.  */
  486. #define CPUFREQ_POLICY_POWERSAVE    (1)
  487. #define CPUFREQ_POLICY_PERFORMANCE  (2)
  488.  
  489. /* Governor Events */
  490. #define CPUFREQ_GOV_START   1
  491. #define CPUFREQ_GOV_STOP    2
  492. #define CPUFREQ_GOV_LIMITS  3
  493. #define CPUFREQ_GOV_POLICY_INIT 4
  494. #define CPUFREQ_GOV_POLICY_EXIT 5
  495.  
  496. struct cpufreq_governor {
  497.     char    name[CPUFREQ_NAME_LEN];
  498.     int initialized;
  499.     int (*governor) (struct cpufreq_policy *policy,
  500.                  unsigned int event);
  501.     ssize_t (*show_setspeed)    (struct cpufreq_policy *policy,
  502.                      char *buf);
  503.     int (*store_setspeed)   (struct cpufreq_policy *policy,
  504.                      unsigned int freq);
  505.     unsigned int max_transition_latency; /* HW must be able to switch to
  506.             next freq faster than this value in nano secs or we
  507.             will fallback to performance governor */
  508.     struct list_head    governor_list;
  509.     struct module       *owner;
  510. };
  511.  
  512. /* Pass a target to the cpufreq driver */
  513. int cpufreq_driver_target(struct cpufreq_policy *policy,
  514.                  unsigned int target_freq,
  515.                  unsigned int relation);
  516. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  517.                    unsigned int target_freq,
  518.                    unsigned int relation);
  519. unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
  520.                      unsigned int target_freq);
  521. int cpufreq_register_governor(struct cpufreq_governor *governor);
  522. void cpufreq_unregister_governor(struct cpufreq_governor *governor);
  523.  
  524. /* CPUFREQ DEFAULT GOVERNOR */
  525. /*
  526.  * Performance governor is fallback governor if any other gov failed to auto
  527.  * load due latency restrictions
  528.  */
  529. #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
  530. extern struct cpufreq_governor cpufreq_gov_performance;
  531. #endif
  532. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
  533. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_performance)
  534. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
  535. extern struct cpufreq_governor cpufreq_gov_powersave;
  536. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_powersave)
  537. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
  538. extern struct cpufreq_governor cpufreq_gov_userspace;
  539. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_userspace)
  540. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
  541. extern struct cpufreq_governor cpufreq_gov_ondemand;
  542. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_ondemand)
  543. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
  544. extern struct cpufreq_governor cpufreq_gov_conservative;
  545. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_conservative)
  546. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
  547. extern struct cpufreq_governor cpufreq_gov_interactive;
  548. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_interactive)
  549. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
  550. extern struct cpufreq_governor cpufreq_gov_sched;
  551. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_sched)
  552. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL)
  553. extern struct cpufreq_governor cpufreq_gov_schedutil;
  554. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_schedutil)
  555. #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_DARKNESSSCHED)
  556. extern struct cpufreq_governor cpufreq_gov_darknesssched;
  557. #define CPUFREQ_DEFAULT_GOVERNOR    (&cpufreq_gov_darknesssched)
  558. #endif
  559.  
  560. static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
  561. {
  562.     if (policy->max < policy->cur)
  563.         __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
  564.     else if (policy->min > policy->cur)
  565.         __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
  566. }
  567.  
  568. /* Governor attribute set */
  569. struct gov_attr_set {
  570.     struct kobject kobj;
  571.     struct list_head policy_list;
  572.     struct mutex update_lock;
  573.     int usage_count;
  574. };
  575.  
  576. /* sysfs ops for cpufreq governors */
  577. extern const struct sysfs_ops governor_sysfs_ops;
  578.  
  579. void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
  580. void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
  581. unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
  582.  
  583. /* Governor sysfs attribute */
  584. struct governor_attr {
  585.     struct attribute attr;
  586.     ssize_t (*show)(struct gov_attr_set *attr_set, char *buf);
  587.     ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
  588.              size_t count);
  589. };
  590.  
  591. /*********************************************************************
  592.  *                     FREQUENCY TABLE HELPERS                       *
  593.  *********************************************************************/
  594.  
  595. /* Special Values of .frequency field */
  596. #define CPUFREQ_ENTRY_INVALID   ~0u
  597. #define CPUFREQ_TABLE_END   ~1u
  598. /* Special Values of .flags field */
  599. #define CPUFREQ_BOOST_FREQ  (1 << 0)
  600.  
  601. struct cpufreq_frequency_table {
  602.     unsigned int    flags;
  603.     unsigned int    driver_data; /* driver specific data, not used by core */
  604.     unsigned int    frequency; /* kHz - doesn't need to be in ascending
  605.                     * order */
  606. };
  607.  
  608. #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
  609. int dev_pm_opp_init_cpufreq_table(struct device *dev,
  610.                   struct cpufreq_frequency_table **table);
  611. void dev_pm_opp_free_cpufreq_table(struct device *dev,
  612.                    struct cpufreq_frequency_table **table);
  613. #else
  614. static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
  615.                         struct cpufreq_frequency_table
  616.                         **table)
  617. {
  618.     return -EINVAL;
  619. }
  620.  
  621. static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
  622.                          struct cpufreq_frequency_table
  623.                          **table)
  624. {
  625. }
  626. #endif
  627.  
  628. static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
  629. {
  630.     while ((*pos)->frequency != CPUFREQ_TABLE_END)
  631.         if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID)
  632.             return true;
  633.         else
  634.             (*pos)++;
  635.     return false;
  636. }
  637.  
  638. /*
  639.  * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
  640.  * @pos:    the cpufreq_frequency_table * to use as a loop cursor.
  641.  * @table:  the cpufreq_frequency_table * to iterate over.
  642.  */
  643.  
  644. #define cpufreq_for_each_entry(pos, table)  \
  645.     for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
  646.  
  647. /*
  648.  * cpufreq_for_each_valid_entry -     iterate over a cpufreq_frequency_table
  649.  *  excluding CPUFREQ_ENTRY_INVALID frequencies.
  650.  * @pos:        the cpufreq_frequency_table * to use as a loop cursor.
  651.  * @table:      the cpufreq_frequency_table * to iterate over.
  652.  */
  653.  
  654. #define cpufreq_for_each_valid_entry(pos, table)    \
  655.     for (pos = table; cpufreq_next_valid(&pos); pos++)
  656.  
  657. int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
  658.                     struct cpufreq_frequency_table *table);
  659.  
  660. int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
  661.                    struct cpufreq_frequency_table *table);
  662. int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
  663.  
  664. int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
  665.                    struct cpufreq_frequency_table *table,
  666.                    unsigned int target_freq,
  667.                    unsigned int relation,
  668.                    unsigned int *index);
  669. int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
  670.         unsigned int freq);
  671.  
  672. ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
  673.  
  674. #ifdef CONFIG_CPU_FREQ
  675. int cpufreq_boost_trigger_state(int state);
  676. int cpufreq_boost_supported(void);
  677. int cpufreq_boost_enabled(void);
  678. int cpufreq_enable_boost_support(void);
  679. bool policy_has_boost_freq(struct cpufreq_policy *policy);
  680. #else
  681. static inline int cpufreq_boost_trigger_state(int state)
  682. {
  683.     return 0;
  684. }
  685. static inline int cpufreq_boost_supported(void)
  686. {
  687.     return 0;
  688. }
  689. static inline int cpufreq_boost_enabled(void)
  690. {
  691.     return 0;
  692. }
  693.  
  694. static inline int cpufreq_enable_boost_support(void)
  695. {
  696.     return -EINVAL;
  697. }
  698.  
  699. static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
  700. {
  701.     return false;
  702. }
  703. #endif
  704. /* the following funtion is for cpufreq core use only */
  705. struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
  706.  
  707. /* the following are really really optional */
  708. extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
  709. extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
  710. extern struct freq_attr *cpufreq_generic_attr[];
  711. int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
  712.                       struct cpufreq_frequency_table *table);
  713.  
  714. unsigned int cpufreq_generic_get(unsigned int cpu);
  715. int cpufreq_generic_init(struct cpufreq_policy *policy,
  716.         struct cpufreq_frequency_table *table,
  717.         unsigned int transition_latency);
  718.  
  719. struct sched_domain;
  720. unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
  721. unsigned long cpufreq_scale_max_freq_capacity(struct sched_domain *sd, int cpu);
  722. unsigned long cpufreq_scale_min_freq_capacity(struct sched_domain *sd, int cpu);
  723. #endif /* _LINUX_CPUFREQ_H */
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement