Tarun93

msm_hotplug powersuspend (exp)

Dec 22nd, 2015
51
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 30.12 KB | None | 0 0
  1. /*
  2.  * MSM Hotplug Driver
  3.  *
  4.  * Copyright (c) 2013-2014, Fluxi <linflux@arcor.de>
  5.  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  6.  *
  7.  * This program is free software; you can redistribute it and/or modify
  8.  * it under the terms of the GNU General Public License version 2 as
  9.  * published by the Free Software Foundation.
  10.  *
  11.  */
  12.  
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/cpu.h>
  16. #include <linux/init.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/sched.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/device.h>
  21. #include <linux/slab.h>
  22. #include <linux/cpufreq.h>
  23. #include <linux/input.h>
  24. #include <linux/math64.h>
  25. #include <linux/kernel_stat.h>
  26. #include <linux/tick.h>
  27.  
  28. #ifdef CONFIG_POWERSUSPEND
  29. #include <linux/powersuspend.h>
  30. #else
  31. #include <linux/fb.h>
  32. #endif
  33.  
  34. #define MSM_HOTPLUG     "msm_hotplug"
  35. #define HOTPLUG_ENABLED     1
  36. #define DEFAULT_UPDATE_RATE HZ / 10
  37. #define START_DELAY     HZ * 20
  38. #define MIN_INPUT_INTERVAL  150 * 1000L
  39. #define DEFAULT_HISTORY_SIZE    10
  40. #define DEFAULT_DOWN_LOCK_DUR   1000
  41. #define DEFAULT_BOOST_LOCK_DUR  4000 * 1000L
  42. #define DEFAULT_NR_CPUS_BOOSTED 2
  43. #define DEFAULT_MIN_CPUS_ONLINE 1
  44. #define DEFAULT_MAX_CPUS_ONLINE NR_CPUS
  45. #define DEFAULT_FAST_LANE_LOAD  99
  46.  
  47. static unsigned int debug = 0;
  48. module_param_named(debug_mask, debug, uint, 0644);
  49.  
  50. #define dprintk(msg...)     \
  51. do {                \
  52.     if (debug)      \
  53.         pr_info(msg);   \
  54. } while (0)
  55.  
  56. static struct cpu_hotplug {
  57.     unsigned int enabled;
  58.     unsigned int target_cpus;
  59.     unsigned int min_cpus_online;
  60.     unsigned int max_cpus_online;
  61.     unsigned int suspend_max_freq;
  62.     unsigned int suspend_max_cpus;
  63.     unsigned int cpus_boosted;
  64.     unsigned int offline_load;
  65.     unsigned int down_lock_dur;
  66.     u64 boost_lock_dur;
  67.     u64 last_input;
  68.     unsigned int fast_lane_load;
  69.     struct work_struct up_work;
  70.     struct work_struct down_work;
  71.     struct work_struct suspend_work;
  72.     struct work_struct resume_work;
  73. #ifndef CONFIG_POWERSUSPEND
  74.     struct notifier_block notif;
  75. #endif
  76. } hotplug = {
  77.     .enabled = HOTPLUG_ENABLED,
  78.     .min_cpus_online = DEFAULT_MIN_CPUS_ONLINE,
  79.     .max_cpus_online = DEFAULT_MAX_CPUS_ONLINE,
  80.     .cpus_boosted = DEFAULT_NR_CPUS_BOOSTED,
  81.     .down_lock_dur = DEFAULT_DOWN_LOCK_DUR,
  82.     .boost_lock_dur = DEFAULT_BOOST_LOCK_DUR,
  83.     .fast_lane_load = DEFAULT_FAST_LANE_LOAD
  84. };
  85.  
  86. static struct workqueue_struct *hotplug_wq;
  87. static struct delayed_work hotplug_work;
  88.  
  89. static u64 last_boost_time;
  90.  
  91. static unsigned int default_update_rates[] = { DEFAULT_UPDATE_RATE };
  92.  
  93. static struct cpu_stats {
  94.     unsigned int *update_rates;
  95.     int nupdate_rates;
  96.     spinlock_t update_rates_lock;
  97.     unsigned int *load_hist;
  98.     unsigned int hist_size;
  99.     unsigned int hist_cnt;
  100.     unsigned int min_cpus;
  101.     unsigned int total_cpus;
  102.     unsigned int online_cpus;
  103.     unsigned int cur_avg_load;
  104.     unsigned int cur_max_load;
  105.     struct mutex stats_mutex;
  106. } stats = {
  107.     .update_rates = default_update_rates,
  108.     .nupdate_rates = ARRAY_SIZE(default_update_rates),
  109.     .hist_size = DEFAULT_HISTORY_SIZE,
  110.     .min_cpus = 1,
  111.     .total_cpus = NR_CPUS
  112. };
  113.  
  114. struct down_lock {
  115.     unsigned int locked;
  116.     struct delayed_work lock_rem;
  117. };
  118.  
  119. static DEFINE_PER_CPU(struct down_lock, lock_info);
  120.  
  121. struct cpu_load_data {
  122.     u64 prev_cpu_idle;
  123.     u64 prev_cpu_wall;
  124.     unsigned int avg_load_maxfreq;
  125.     unsigned int cur_load_maxfreq;
  126.     unsigned int samples;
  127.     unsigned int window_size;
  128.     cpumask_var_t related_cpus;
  129. };
  130.  
  131. static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
  132.  
  133. static bool io_is_busy;
  134.  
  135. static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
  136. {
  137.     u64 idle_time;
  138.     u64 cur_wall_time;
  139.     u64 busy_time;
  140.  
  141.     cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
  142.  
  143.     busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
  144.     busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
  145.     busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
  146.     busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
  147.     busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
  148.     busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
  149.  
  150.     idle_time = cur_wall_time - busy_time;
  151.     if (wall)
  152.         *wall = jiffies_to_usecs(cur_wall_time);
  153.  
  154.     return jiffies_to_usecs(idle_time);
  155. }
  156.  
  157. static inline u64 get_cpu_idle_time(unsigned int cpu, u64 *wall)
  158. {
  159.     u64 idle_time = get_cpu_idle_time_us(cpu, io_is_busy ? wall : NULL);
  160.  
  161.     if (idle_time == -1ULL)
  162.         return get_cpu_idle_time_jiffy(cpu, wall);
  163.     else if (!io_is_busy)
  164.         idle_time += get_cpu_iowait_time_us(cpu, wall);
  165.  
  166.     return idle_time;
  167. }
  168.  
  169. static int update_average_load(unsigned int cpu)
  170. {
  171.     int ret;
  172.     unsigned int idle_time, wall_time;
  173.     unsigned int cur_load, load_max_freq;
  174.     u64 cur_wall_time, cur_idle_time;
  175.     struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
  176.     struct cpufreq_policy policy;
  177.  
  178.     ret = cpufreq_get_policy(&policy, cpu);
  179.     if (ret)
  180.         return -EINVAL;
  181.  
  182.     cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
  183.  
  184.     wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
  185.     pcpu->prev_cpu_wall = cur_wall_time;
  186.  
  187.     idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
  188.     pcpu->prev_cpu_idle = cur_idle_time;
  189.  
  190.     if (unlikely(!wall_time || wall_time < idle_time))
  191.         return 0;
  192.  
  193.     cur_load = 100 * (wall_time - idle_time) / wall_time;
  194.  
  195.     /* Calculate the scaled load across cpu */
  196.     load_max_freq = (cur_load * policy.cur) / policy.max;
  197.  
  198.     if (!pcpu->avg_load_maxfreq) {
  199.         /* This is the first sample in this window */
  200.         pcpu->avg_load_maxfreq = load_max_freq;
  201.         pcpu->window_size = wall_time;
  202.     } else {
  203.         /*
  204.          * The is already a sample available in this window.
  205.          * Compute weighted average with prev entry, so that
  206.          * we get the precise weighted load.
  207.          */
  208.         pcpu->avg_load_maxfreq =
  209.             ((pcpu->avg_load_maxfreq * pcpu->window_size) +
  210.             (load_max_freq * wall_time)) /
  211.             (wall_time + pcpu->window_size);
  212.  
  213.         pcpu->window_size += wall_time;
  214.     }
  215.  
  216.     return 0;
  217. }
  218.  
  219. static unsigned int load_at_max_freq(void)
  220. {
  221.     int cpu;
  222.     unsigned int total_load = 0, max_load = 0;
  223.     struct cpu_load_data *pcpu;
  224.  
  225.     for_each_online_cpu(cpu) {
  226.         pcpu = &per_cpu(cpuload, cpu);
  227.         update_average_load(cpu);
  228.         total_load += pcpu->avg_load_maxfreq;
  229.         pcpu->cur_load_maxfreq = pcpu->avg_load_maxfreq;
  230.         max_load = max(max_load, pcpu->avg_load_maxfreq);
  231.         pcpu->avg_load_maxfreq = 0;
  232.     }
  233.     stats.cur_max_load = max_load;
  234.  
  235.     return total_load;
  236. }
  237. static void update_load_stats(void)
  238. {
  239.     unsigned int i, j;
  240.     unsigned int load = 0;
  241.  
  242.     mutex_lock(&stats.stats_mutex);
  243.     stats.online_cpus = num_online_cpus();
  244.  
  245.     if (stats.hist_size > 1) {
  246.         stats.load_hist[stats.hist_cnt] = load_at_max_freq();
  247.     } else {
  248.         stats.cur_avg_load = load_at_max_freq();
  249.         mutex_unlock(&stats.stats_mutex);
  250.         return;
  251.     }
  252.  
  253.     for (i = 0, j = stats.hist_cnt; i < stats.hist_size; i++, j--) {
  254.         load += stats.load_hist[j];
  255.  
  256.         if (j == 0)
  257.             j = stats.hist_size;
  258.     }
  259.  
  260.     if (++stats.hist_cnt == stats.hist_size)
  261.         stats.hist_cnt = 0;
  262.  
  263.     stats.cur_avg_load = load / stats.hist_size;
  264.     mutex_unlock(&stats.stats_mutex);
  265. }
  266.  
  267. struct loads_tbl {
  268.     unsigned int up_threshold;
  269.     unsigned int down_threshold;
  270. };
  271.  
  272. #define LOAD_SCALE(u, d)     \
  273. {                            \
  274.     .up_threshold = u,   \
  275.     .down_threshold = d, \
  276. }
  277.  
  278. static struct loads_tbl loads[] = {
  279.     LOAD_SCALE(400, 0),
  280.     LOAD_SCALE(65, 0),
  281.     LOAD_SCALE(120, 50),
  282.     LOAD_SCALE(190, 100),
  283.     LOAD_SCALE(410, 170),
  284.     LOAD_SCALE(0, 0),
  285. };
  286.  
  287. static void apply_down_lock(unsigned int cpu)
  288. {
  289.     struct down_lock *dl = &per_cpu(lock_info, cpu);
  290.  
  291.     dl->locked = 1;
  292.     queue_delayed_work_on(0, hotplug_wq, &dl->lock_rem,
  293.                   msecs_to_jiffies(hotplug.down_lock_dur));
  294. }
  295.  
  296. static void remove_down_lock(struct work_struct *work)
  297. {
  298.     struct down_lock *dl = container_of(work, struct down_lock,
  299.                         lock_rem.work);
  300.     dl->locked = 0;
  301. }
  302.  
  303. static int check_down_lock(unsigned int cpu)
  304. {
  305.     struct down_lock *dl = &per_cpu(lock_info, cpu);
  306.  
  307.     return dl->locked;
  308. }
  309.  
  310. static int get_lowest_load_cpu(void)
  311. {
  312.     int cpu, lowest_cpu = 0;
  313.     unsigned int lowest_load = UINT_MAX;
  314.     unsigned int cpu_load[NR_CPUS];
  315.     unsigned int proj_load;
  316.     struct cpu_load_data *pcpu;
  317.  
  318.     for_each_online_cpu(cpu) {
  319.         if (cpu == 0)
  320.             continue;
  321.         pcpu = &per_cpu(cpuload, cpu);
  322.         cpu_load[cpu] = pcpu->cur_load_maxfreq;
  323.         if (cpu_load[cpu] < lowest_load) {
  324.             lowest_load = cpu_load[cpu];
  325.             lowest_cpu = cpu;
  326.         }
  327.     }
  328.  
  329.     proj_load = stats.cur_avg_load - lowest_load;
  330.     if (proj_load > loads[stats.online_cpus - 1].up_threshold)
  331.         return -EPERM;
  332.  
  333.     if (hotplug.offline_load && lowest_load >= hotplug.offline_load)
  334.         return -EPERM;
  335.  
  336.     return lowest_cpu;
  337. }
  338.  
  339. static void __ref cpu_up_work(struct work_struct *work)
  340. {
  341.     int cpu;
  342.     unsigned int target;
  343.  
  344.     target = hotplug.target_cpus;
  345.  
  346.     for_each_cpu_not(cpu, cpu_online_mask) {
  347.         if (target == num_online_cpus())
  348.             break;
  349.         if (cpu == 0)
  350.             continue;
  351.         cpu_up(cpu);
  352.         apply_down_lock(cpu);
  353.     }
  354. }
  355.  
  356. static void cpu_down_work(struct work_struct *work)
  357. {
  358.     int cpu, lowest_cpu;
  359.     unsigned int target;
  360.  
  361.     target = hotplug.target_cpus;
  362.  
  363.     for_each_online_cpu(cpu) {
  364.         if (cpu == 0)
  365.             continue;
  366.         lowest_cpu = get_lowest_load_cpu();
  367.         if (lowest_cpu > 0) {
  368.             if (check_down_lock(lowest_cpu))
  369.                 break;
  370.             cpu_down(lowest_cpu);
  371.         }
  372.         if (target == num_online_cpus())
  373.             break;
  374.     }
  375. }
  376.  
  377. static void online_cpu(unsigned int target)
  378. {
  379.     if (!hotplug.enabled)
  380.         return;
  381.  
  382.     if (stats.total_cpus == num_online_cpus())
  383.         return;
  384.  
  385.     hotplug.target_cpus = target;
  386.     queue_work_on(0, hotplug_wq, &hotplug.up_work);
  387. }
  388.  
  389. static void offline_cpu(unsigned int target)
  390. {
  391.     unsigned int online_cpus = num_online_cpus();
  392.     u64 now;
  393.  
  394.     if (!hotplug.enabled)
  395.         return;
  396.  
  397.     if (online_cpus == stats.min_cpus)
  398.         return;
  399.  
  400.     now = ktime_to_us(ktime_get());
  401.     if (online_cpus <= hotplug.cpus_boosted &&
  402.         (now - hotplug.last_input < hotplug.boost_lock_dur))
  403.         return;
  404.  
  405.     hotplug.target_cpus = target;
  406.     queue_work_on(0, hotplug_wq, &hotplug.down_work);
  407. }
  408.  
  409. static unsigned int load_to_update_rate(unsigned int load)
  410. {
  411.     int i, ret;
  412.     unsigned long flags;
  413.  
  414.     spin_lock_irqsave(&stats.update_rates_lock, flags);
  415.  
  416.     for (i = 0; i < stats.nupdate_rates - 1 &&
  417.             load >= stats.update_rates[i+1]; i += 2)
  418.         ;
  419.  
  420.     ret = stats.update_rates[i];
  421.     spin_unlock_irqrestore(&stats.update_rates_lock, flags);
  422.     return ret;
  423. }
  424.  
  425. static void reschedule_hotplug_work(void)
  426. {
  427.     unsigned int delay;
  428.  
  429.     delay = load_to_update_rate(stats.cur_avg_load);
  430.     queue_delayed_work_on(0, hotplug_wq, &hotplug_work,
  431.                   msecs_to_jiffies(delay));
  432. }
  433.  
  434. static void msm_hotplug_work(struct work_struct *work)
  435. {
  436.     unsigned int i, target = 0;
  437.  
  438.     update_load_stats();
  439.  
  440.     if (stats.cur_max_load >= hotplug.fast_lane_load) {
  441.         /* Enter the fast lane */
  442.         online_cpu(hotplug.max_cpus_online);
  443.         goto reschedule;
  444.     }
  445.  
  446.     /* If number of cpus locked, break out early */
  447.     if (hotplug.min_cpus_online == num_possible_cpus()) {
  448.         if (stats.online_cpus != hotplug.min_cpus_online)
  449.             online_cpu(hotplug.min_cpus_online);
  450.         goto reschedule;
  451.     } else if (hotplug.max_cpus_online == stats.min_cpus) {
  452.         if (stats.online_cpus != hotplug.max_cpus_online)
  453.             offline_cpu(hotplug.max_cpus_online);
  454.         goto reschedule;
  455.     }
  456.  
  457.     for (i = stats.min_cpus; loads[i].up_threshold; i++) {
  458.         if (stats.cur_avg_load <= loads[i].up_threshold
  459.             && stats.cur_avg_load > loads[i].down_threshold) {
  460.             target = i;
  461.             break;
  462.         }
  463.     }
  464.  
  465.     if (target > hotplug.max_cpus_online)
  466.         target = hotplug.max_cpus_online;
  467.     else if (target < hotplug.min_cpus_online)
  468.         target = hotplug.min_cpus_online;
  469.  
  470.     if (stats.online_cpus != target) {
  471.         if (target > stats.online_cpus)
  472.             online_cpu(target);
  473.         else if (target < stats.online_cpus)
  474.             offline_cpu(target);
  475.     }
  476.  
  477. reschedule:
  478.     dprintk("%s: cur_load: %3u online_cpus: %u target: %u\n", MSM_HOTPLUG,
  479.         stats.cur_avg_load, stats.online_cpus, target);
  480.  
  481.     reschedule_hotplug_work();
  482. }
  483.  
  484. static void hotplug_input_event(struct input_handle *handle, unsigned int type,
  485.                 unsigned int code, int value)
  486. {
  487.     u64 now = ktime_to_us(ktime_get());
  488.  
  489.     hotplug.last_input = now;
  490.     if (now - last_boost_time < MIN_INPUT_INTERVAL)
  491.         return;
  492.  
  493.     if (num_online_cpus() >= hotplug.cpus_boosted)
  494.         return;
  495.  
  496.     dprintk("%s: online_cpus: %u boosted\n", MSM_HOTPLUG,
  497.         stats.online_cpus);
  498.  
  499.     online_cpu(hotplug.cpus_boosted);
  500.     last_boost_time = ktime_to_us(ktime_get());
  501. }
  502.  
  503. static int hotplug_input_connect(struct input_handler *handler,
  504.                  struct input_dev *dev,
  505.                  const struct input_device_id *id)
  506. {
  507.     struct input_handle *handle;
  508.     int err;
  509.  
  510.     handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  511.     if (!handle)
  512.         return -ENOMEM;
  513.  
  514.     handle->dev = dev;
  515.     handle->handler = handler;
  516.     handle->name = handler->name;
  517.  
  518.     err = input_register_handle(handle);
  519.     if (err)
  520.         goto err_register;
  521.  
  522.     err = input_open_device(handle);
  523.     if (err)
  524.         goto err_open;
  525.  
  526.     return 0;
  527. err_register:
  528.     input_unregister_handle(handle);
  529. err_open:
  530.     kfree(handle);
  531.     return err;
  532. }
  533.  
  534. static void hotplug_input_disconnect(struct input_handle *handle)
  535. {
  536.     input_close_device(handle);
  537.     input_unregister_handle(handle);
  538.     kfree(handle);
  539. }
  540.  
  541. static const struct input_device_id hotplug_ids[] = {
  542.     { .driver_info = 1 },
  543.     { },
  544. };
  545.  
  546. static struct input_handler hotplug_input_handler = {
  547.     .event      = hotplug_input_event,
  548.     .connect    = hotplug_input_connect,
  549.     .disconnect = hotplug_input_disconnect,
  550.     .name       = MSM_HOTPLUG,
  551.     .id_table   = hotplug_ids,
  552. };
  553.  
  554. static void msm_hotplug_suspend(struct work_struct *work)
  555. {
  556.     int ret;
  557.     unsigned int cpu, max_freq = 0;
  558.     struct cpufreq_policy policy;
  559.  
  560.     for_each_possible_cpu(cpu) {
  561.         ret = cpufreq_get_policy(&policy, cpu);
  562.         if (ret)
  563.             continue;
  564.         if (!cpu)
  565.             max_freq = policy.max;
  566.         cpufreq_verify_within_limits(&policy, policy.min,
  567.                          hotplug.suspend_max_freq);
  568.         if (hotplug.suspend_max_cpus == num_online_cpus())
  569.             break;
  570.         if (cpu && cpu_online(cpu))
  571.             cpu_down(cpu);
  572.     }
  573.  
  574.     if (hotplug.suspend_max_cpus == 1) {
  575.         flush_workqueue(hotplug_wq);
  576.         cancel_delayed_work_sync(&hotplug_work);
  577.     }
  578.  
  579.     dprintk("%s: Suspending %u cpus max to %uMHz\n", MSM_HOTPLUG,
  580.         hotplug.suspend_max_cpus > 0 ?
  581.         hotplug.suspend_max_cpus : stats.total_cpus,
  582.         (hotplug.suspend_max_freq > 0 ?
  583.         hotplug.suspend_max_freq : max_freq) / 1000);
  584. }
  585.  
  586. static void msm_hotplug_resume(struct work_struct *work)
  587. {
  588.     int ret;
  589.     unsigned int cpu, max_freq = 0;
  590.     struct cpufreq_policy policy;
  591.  
  592.     online_cpu(stats.total_cpus);
  593.  
  594.     for_each_possible_cpu(cpu) {
  595.         ret = cpufreq_get_policy(&policy, cpu);
  596.         if (ret)
  597.             continue;
  598.         cpufreq_verify_within_cpu_limits(&policy);
  599.         if (!cpu)
  600.             max_freq = policy.max;
  601.     }
  602.  
  603.     reschedule_hotplug_work();
  604.  
  605.     dprintk("%s: Resuming cpus to %uMHz\n", MSM_HOTPLUG, max_freq / 1000);
  606. }
  607.  
  608. #ifdef CONFIG_POWERSUSPEND
  609. static struct power_suspend msm_hotplug_power_suspend_driver = {
  610.     .suspend = __msm_hotplug_suspend,
  611.     .resume = __msm_hotplug_resume,
  612. };
  613. #else
  614. static int prev_fb = FB_BLANK_UNBLANK;
  615.  
  616. static int fb_notifier_callback(struct notifier_block *self,
  617.                 unsigned long event, void *data)
  618. {
  619.     struct fb_event *evdata = data;
  620.     int *blank;
  621.  
  622.     if (evdata && evdata->data && event == FB_EVENT_BLANK) {
  623.         blank = evdata->data;
  624.         switch (*blank) {
  625.             case FB_BLANK_UNBLANK:
  626.                 if (prev_fb == FB_BLANK_POWERDOWN) {
  627.                     /* display on */
  628.                     __msm_hotplug_resume();
  629.                     prev_fb = FB_BLANK_UNBLANK;
  630.                 }
  631.                 break;
  632.             case FB_BLANK_POWERDOWN:
  633.                 if (prev_fb == FB_BLANK_UNBLANK) {
  634.                     /* display off */
  635.                     __msm_hotplug_suspend();
  636.                     prev_fb = FB_BLANK_POWERDOWN;
  637.                 }
  638.                 break;
  639.         }
  640.     }
  641.  
  642.     return NOTIFY_OK;
  643. }
  644. #endif
  645.  
  646. static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
  647. {
  648.     const char *cp;
  649.     int i;
  650.     int ntokens = 1;
  651.     int *tokenized_data;
  652.     int err = -EINVAL;
  653.  
  654.     cp = buf;
  655.     while ((cp = strpbrk(cp + 1, " :")))
  656.         ntokens++;
  657.  
  658.     if (!(ntokens & 0x1))
  659.         goto err;
  660.  
  661.     tokenized_data = kmalloc(ntokens * sizeof(int), GFP_KERNEL);
  662.     if (!tokenized_data) {
  663.         err = -ENOMEM;
  664.         goto err;
  665.     }
  666.  
  667.     cp = buf;
  668.     i = 0;
  669.     while (i < ntokens) {
  670.         if (sscanf(cp, "%d", &tokenized_data[i++]) != 1)
  671.             goto err_kfree;
  672.  
  673.         cp = strpbrk(cp, " :");
  674.         if (!cp)
  675.             break;
  676.         cp++;
  677.     }
  678.  
  679.     if (i != ntokens)
  680.         goto err_kfree;
  681.  
  682.     *num_tokens = ntokens;
  683.     return tokenized_data;
  684.  
  685. err_kfree:
  686.     kfree(tokenized_data);
  687. err:
  688.     return ERR_PTR(err);
  689. }
  690.  
  691. /************************** sysfs interface ************************/
  692.  
  693. static ssize_t show_enable_hotplug(struct device *dev,
  694.                    struct device_attribute *msm_hotplug_attrs,
  695.                    char *buf)
  696. {
  697.     return sprintf(buf, "%u\n", hotplug.enabled);
  698. }
  699.  
  700. static ssize_t store_enable_hotplug(struct device *dev,
  701.                     struct device_attribute *msm_hotplug_attrs,
  702.                     const char *buf, size_t count)
  703. {
  704.     int ret, cpu;
  705.     unsigned int val;
  706.  
  707.     ret = sscanf(buf, "%u", &val);
  708.     if (ret != 1 || val < 0 || val > 1)
  709.         return -EINVAL;
  710.  
  711.     hotplug.enabled = val;
  712.  
  713.     if (hotplug.enabled) {
  714.         reschedule_hotplug_work();
  715.     } else {
  716.         flush_workqueue(hotplug_wq);
  717.         cancel_delayed_work_sync(&hotplug_work);
  718.         for_each_online_cpu(cpu) {
  719.             if (cpu == 0)
  720.                 continue;
  721.             cpu_down(cpu);
  722.         }
  723.     }
  724.  
  725.     return count;
  726. }
  727.  
  728. static ssize_t show_down_lock_duration(struct device *dev,
  729.                        struct device_attribute
  730.                        *msm_hotplug_attrs, char *buf)
  731. {
  732.     return sprintf(buf, "%u\n", hotplug.down_lock_dur);
  733. }
  734.  
  735. static ssize_t store_down_lock_duration(struct device *dev,
  736.                     struct device_attribute
  737.                     *msm_hotplug_attrs, const char *buf,
  738.                     size_t count)
  739. {
  740.     int ret;
  741.     unsigned int val;
  742.  
  743.     ret = sscanf(buf, "%u", &val);
  744.     if (ret != 1)
  745.         return -EINVAL;
  746.  
  747.     hotplug.down_lock_dur = val;
  748.  
  749.     return count;
  750. }
  751.  
  752. static ssize_t show_boost_lock_duration(struct device *dev,
  753.                         struct device_attribute
  754.                         *msm_hotplug_attrs, char *buf)
  755. {
  756.     return sprintf(buf, "%llu\n", div_u64(hotplug.boost_lock_dur, 1000));
  757. }
  758.  
  759. static ssize_t store_boost_lock_duration(struct device *dev,
  760.                      struct device_attribute
  761.                      *msm_hotplug_attrs, const char *buf,
  762.                      size_t count)
  763. {
  764.     int ret;
  765.     u64 val;
  766.  
  767.     ret = sscanf(buf, "%llu", &val);
  768.     if (ret != 1)
  769.         return -EINVAL;
  770.  
  771.     hotplug.boost_lock_dur = val * 1000;
  772.  
  773.     return count;
  774. }
  775.  
  776. static ssize_t show_update_rates(struct device *dev,
  777.                 struct device_attribute *msm_hotplug_attrs,
  778.                 char *buf)
  779. {
  780.     int i;
  781.     ssize_t ret = 0;
  782.     unsigned long flags;
  783.  
  784.     spin_lock_irqsave(&stats.update_rates_lock, flags);
  785.  
  786.     for (i = 0; i < stats.nupdate_rates; i++)
  787.         ret += sprintf(buf + ret, "%u%s", stats.update_rates[i],
  788.                    i & 0x1 ? ":" : " ");
  789.  
  790.     sprintf(buf + ret - 1, "\n");
  791.     spin_unlock_irqrestore(&stats.update_rates_lock, flags);
  792.     return ret;
  793. }
  794.  
  795. static ssize_t store_update_rates(struct device *dev,
  796.                  struct device_attribute *msm_hotplug_attrs,
  797.                  const char *buf, size_t count)
  798. {
  799.     int ntokens;
  800.     unsigned int *new_update_rates = NULL;
  801.     unsigned long flags;
  802.  
  803.     new_update_rates = get_tokenized_data(buf, &ntokens);
  804.     if (IS_ERR(new_update_rates))
  805.         return PTR_RET(new_update_rates);
  806.  
  807.     spin_lock_irqsave(&stats.update_rates_lock, flags);
  808.     if (stats.update_rates != default_update_rates)
  809.         kfree(stats.update_rates);
  810.     stats.update_rates = new_update_rates;
  811.     stats.nupdate_rates = ntokens;
  812.     spin_unlock_irqrestore(&stats.update_rates_lock, flags);
  813.     return count;
  814. }
  815.  
  816. static ssize_t show_load_levels(struct device *dev,
  817.                 struct device_attribute *msm_hotplug_attrs,
  818.                 char *buf)
  819. {
  820.     int i, len = 0;
  821.  
  822.     if (!buf)
  823.         return -EINVAL;
  824.  
  825.     for (i = 0; loads[i].up_threshold; i++) {
  826.         len += sprintf(buf + len, "%u ", i);
  827.         len += sprintf(buf + len, "%u ", loads[i].up_threshold);
  828.         len += sprintf(buf + len, "%u\n", loads[i].down_threshold);
  829.     }
  830.  
  831.     return len;
  832. }
  833.  
  834. static ssize_t store_load_levels(struct device *dev,
  835.                  struct device_attribute *msm_hotplug_attrs,
  836.                  const char *buf, size_t count)
  837. {
  838.     int ret;
  839.     unsigned int val[3];
  840.  
  841.     ret = sscanf(buf, "%u %u %u", &val[0], &val[1], &val[2]);
  842.     if (ret != ARRAY_SIZE(val) || val[2] > val[1])
  843.         return -EINVAL;
  844.  
  845.     loads[val[0]].up_threshold = val[1];
  846.     loads[val[0]].down_threshold = val[2];
  847.  
  848.     return count;
  849. }
  850.  
  851. static ssize_t show_history_size(struct device *dev,
  852.                  struct device_attribute *msm_hotplug_attrs,
  853.                  char *buf)
  854. {
  855.     return sprintf(buf, "%u\n", stats.hist_size);
  856. }
  857.  
  858. static ssize_t store_history_size(struct device *dev,
  859.                   struct device_attribute *msm_hotplug_attrs,
  860.                   const char *buf, size_t count)
  861. {
  862.     int ret;
  863.     unsigned int val;
  864.  
  865.     ret = sscanf(buf, "%u", &val);
  866.     if (ret != 1 || val < 1 || val > 20)
  867.         return -EINVAL;
  868.  
  869.     flush_workqueue(hotplug_wq);
  870.     cancel_delayed_work_sync(&hotplug_work);
  871.  
  872.     memset(stats.load_hist, 0, sizeof(stats.load_hist));
  873.     stats.hist_size = val;
  874.  
  875.     reschedule_hotplug_work();
  876.  
  877.     return count;
  878. }
  879.  
  880. static ssize_t show_min_cpus_online(struct device *dev,
  881.                     struct device_attribute *msm_hotplug_attrs,
  882.                     char *buf)
  883. {
  884.     return sprintf(buf, "%u\n", hotplug.min_cpus_online);
  885. }
  886.  
  887. static ssize_t store_min_cpus_online(struct device *dev,
  888.                      struct device_attribute *msm_hotplug_attrs,
  889.                      const char *buf, size_t count)
  890. {
  891.     int ret;
  892.     unsigned int val;
  893.  
  894.     ret = sscanf(buf, "%u", &val);
  895.     if (ret != 1 || val < 1 || val > stats.total_cpus)
  896.         return -EINVAL;
  897.  
  898.     if (hotplug.max_cpus_online < val)
  899.         hotplug.max_cpus_online = val;
  900.  
  901.     hotplug.min_cpus_online = val;
  902.  
  903.     return count;
  904. }
  905.  
  906. static ssize_t show_max_cpus_online(struct device *dev,
  907.                     struct device_attribute *msm_hotplug_attrs,
  908.                     char *buf)
  909. {
  910.     return sprintf(buf, "%u\n",hotplug.max_cpus_online);
  911. }
  912.  
  913. static ssize_t store_max_cpus_online(struct device *dev,
  914.                      struct device_attribute *msm_hotplug_attrs,
  915.                      const char *buf, size_t count)
  916. {
  917.     int ret;
  918.     unsigned int val;
  919.  
  920.     ret = sscanf(buf, "%u", &val);
  921.     if (ret != 1 || val < 1 || val > stats.total_cpus)
  922.         return -EINVAL;
  923.  
  924.     if (hotplug.min_cpus_online > val)
  925.         hotplug.min_cpus_online = val;
  926.  
  927.     hotplug.max_cpus_online = val;
  928.  
  929.     return count;
  930. }
  931.  
  932. static ssize_t show_suspend_max_freq(struct device *dev,
  933.                      struct device_attribute *msm_hotplug_attrs,
  934.                      char *buf)
  935. {
  936.     return sprintf(buf, "%u\n", hotplug.suspend_max_freq);
  937. }
  938.  
  939. static ssize_t store_suspend_max_freq(struct device *dev,
  940.                       struct device_attribute *msm_hotplug_attrs,
  941.                       const char *buf, size_t count)
  942. {
  943.     int ret;
  944.     unsigned int val;
  945.     struct cpufreq_policy *policy = cpufreq_cpu_get(0);
  946.  
  947.     ret = sscanf(buf, "%u", &val);
  948.     if (ret != 1)
  949.         return -EINVAL;
  950.  
  951.     if (val == 0)
  952.         goto out;
  953.  
  954.     if (val < policy->min)
  955.         val = policy->min;
  956.     else if (val > policy->max)
  957.         val = policy->max;
  958. out:
  959.     hotplug.suspend_max_freq = val;
  960.  
  961.     return count;
  962. }
  963.  
  964. static ssize_t show_suspend_max_cpus(struct device *dev,
  965.                      struct device_attribute *msm_hotplug_attrs,
  966.                      char *buf)
  967. {
  968.     return sprintf(buf, "%u\n", hotplug.suspend_max_cpus);
  969. }
  970.  
  971. static ssize_t store_suspend_max_cpus(struct device *dev,
  972.                       struct device_attribute *msm_hotplug_attrs,
  973.                       const char *buf, size_t count)
  974. {
  975.     int ret;
  976.     unsigned int val;
  977.  
  978.     ret = sscanf(buf, "%u", &val);
  979.     if (ret != 1 || val < 0 || val > stats.total_cpus)
  980.         return -EINVAL;
  981.  
  982.     hotplug.suspend_max_cpus = val;
  983.  
  984.     return count;
  985. }
  986.  
  987. static ssize_t show_cpus_boosted(struct device *dev,
  988.                  struct device_attribute *msm_hotplug_attrs,
  989.                  char *buf)
  990. {
  991.     return sprintf(buf, "%u\n", hotplug.cpus_boosted);
  992. }
  993.  
  994. static ssize_t store_cpus_boosted(struct device *dev,
  995.                   struct device_attribute *msm_hotplug_attrs,
  996.                   const char *buf, size_t count)
  997. {
  998.     int ret;
  999.     unsigned int val;
  1000.  
  1001.     ret = sscanf(buf, "%u", &val);
  1002.     if (ret != 1 || val < 1)
  1003.         return -EINVAL;
  1004.  
  1005.     hotplug.cpus_boosted = val;
  1006.  
  1007.     return count;
  1008. }
  1009.  
  1010. static ssize_t show_offline_load(struct device *dev,
  1011.                  struct device_attribute *msm_hotplug_attrs,
  1012.                  char *buf)
  1013. {
  1014.     return sprintf(buf, "%u\n", hotplug.offline_load);
  1015. }
  1016.  
  1017. static ssize_t store_offline_load(struct device *dev,
  1018.                   struct device_attribute *msm_hotplug_attrs,
  1019.                   const char *buf, size_t count)
  1020. {
  1021.     int ret;
  1022.     unsigned int val;
  1023.  
  1024.     ret = sscanf(buf, "%u", &val);
  1025.     if (ret != 1)
  1026.         return -EINVAL;
  1027.  
  1028.     hotplug.offline_load = val;
  1029.  
  1030.     return count;
  1031. }
  1032.  
  1033. static ssize_t show_fast_lane_load(struct device *dev,
  1034.                    struct device_attribute *msm_hotplug_attrs,
  1035.                    char *buf)
  1036. {
  1037.     return sprintf(buf, "%u\n", hotplug.fast_lane_load);
  1038. }
  1039.  
  1040. static ssize_t store_fast_lane_load(struct device *dev,
  1041.                     struct device_attribute *msm_hotplug_attrs,
  1042.                     const char *buf, size_t count)
  1043. {
  1044.     int ret;
  1045.     unsigned int val;
  1046.  
  1047.     ret = sscanf(buf, "%u", &val);
  1048.     if (ret != 1)
  1049.         return -EINVAL;
  1050.  
  1051.     hotplug.fast_lane_load = val;
  1052.  
  1053.     return count;
  1054. }
  1055.  
  1056. static ssize_t show_io_is_busy(struct device *dev,
  1057.                    struct device_attribute *msm_hotplug_attrs,
  1058.                    char *buf)
  1059. {
  1060.     return sprintf(buf, "%u\n", io_is_busy);
  1061. }
  1062.  
  1063. static ssize_t store_io_is_busy(struct device *dev,
  1064.                     struct device_attribute *msm_hotplug_attrs,
  1065.                     const char *buf, size_t count)
  1066. {
  1067.     int ret;
  1068.     unsigned int val;
  1069.  
  1070.     ret = sscanf(buf, "%u", &val);
  1071.     if (ret != 1 || val < 0 || val > 1)
  1072.         return -EINVAL;
  1073.  
  1074.     io_is_busy = val ? true : false;
  1075.  
  1076.     return count;
  1077. }
  1078.  
  1079. static ssize_t show_current_load(struct device *dev,
  1080.                  struct device_attribute *msm_hotplug_attrs,
  1081.                  char *buf)
  1082. {
  1083.     return sprintf(buf, "%u\n", stats.cur_avg_load);
  1084. }
  1085.  
  1086. static DEVICE_ATTR(enabled, 644, show_enable_hotplug, store_enable_hotplug);
  1087. static DEVICE_ATTR(down_lock_duration, 644, show_down_lock_duration,
  1088.            store_down_lock_duration);
  1089. static DEVICE_ATTR(boost_lock_duration, 644, show_boost_lock_duration,
  1090.            store_boost_lock_duration);
  1091. static DEVICE_ATTR(update_rates, 644, show_update_rates, store_update_rates);
  1092. static DEVICE_ATTR(load_levels, 644, show_load_levels, store_load_levels);
  1093. static DEVICE_ATTR(history_size, 644, show_history_size, store_history_size);
  1094. static DEVICE_ATTR(min_cpus_online, 644, show_min_cpus_online,
  1095.            store_min_cpus_online);
  1096. static DEVICE_ATTR(max_cpus_online, 644, show_max_cpus_online,
  1097.            store_max_cpus_online);
  1098. static DEVICE_ATTR(suspend_max_freq, 644, show_suspend_max_freq,
  1099.            store_suspend_max_freq);
  1100. static DEVICE_ATTR(suspend_max_cpus, 644, show_suspend_max_cpus,
  1101.            store_suspend_max_cpus);
  1102. static DEVICE_ATTR(cpus_boosted, 644, show_cpus_boosted, store_cpus_boosted);
  1103. static DEVICE_ATTR(offline_load, 644, show_offline_load, store_offline_load);
  1104. static DEVICE_ATTR(fast_lane_load, 644, show_fast_lane_load,
  1105.            store_fast_lane_load);
  1106. static DEVICE_ATTR(io_is_busy, 644, show_io_is_busy, store_io_is_busy);
  1107. static DEVICE_ATTR(current_load, 444, show_current_load, NULL);
  1108.  
  1109. static struct attribute *msm_hotplug_attrs[] = {
  1110.     &dev_attr_enabled.attr,
  1111.     &dev_attr_down_lock_duration.attr,
  1112.     &dev_attr_boost_lock_duration.attr,
  1113.     &dev_attr_update_rates.attr,
  1114.     &dev_attr_load_levels.attr,
  1115.     &dev_attr_history_size.attr,
  1116.     &dev_attr_min_cpus_online.attr,
  1117.     &dev_attr_max_cpus_online.attr,
  1118.     &dev_attr_suspend_max_freq.attr,
  1119.     &dev_attr_suspend_max_cpus.attr,
  1120.     &dev_attr_cpus_boosted.attr,
  1121.     &dev_attr_offline_load.attr,
  1122.     &dev_attr_fast_lane_load.attr,
  1123.     &dev_attr_io_is_busy.attr,
  1124.     &dev_attr_current_load.attr,
  1125.     NULL,
  1126. };
  1127.  
  1128. static struct attribute_group attr_group = {
  1129.     .attrs = msm_hotplug_attrs,
  1130. };
  1131.  
  1132. /************************** sysfs end ************************/
  1133.  
  1134. static int __devinit msm_hotplug_probe(struct platform_device *pdev)
  1135. {
  1136.     int cpu, ret = 0;
  1137.     struct kobject *module_kobj;
  1138.     struct down_lock *dl;
  1139.  
  1140.     hotplug_wq =
  1141.         alloc_workqueue("msm_hotplug_wq", WQ_HIGHPRI | WQ_FREEZABLE, 0);
  1142.     if (!hotplug_wq) {
  1143.         pr_err("%s: Failed to allocate hotplug workqueue\n",
  1144.                MSM_HOTPLUG);
  1145.         ret = -ENOMEM;
  1146.         goto err_out;
  1147.     }
  1148.  
  1149.     module_kobj = kset_find_obj(module_kset, MSM_HOTPLUG);
  1150.     if (!module_kobj) {
  1151.         pr_err("%s: Cannot find kobject for module\n", MSM_HOTPLUG);
  1152.         goto err_dev;
  1153.     }
  1154.  
  1155.     ret = sysfs_create_group(module_kobj, &attr_group);
  1156.     if (ret) {
  1157.         pr_err("%s: Failed to create sysfs: %d\n", MSM_HOTPLUG, ret);
  1158.         goto err_dev;
  1159.     }
  1160.  
  1161.  
  1162. #ifdef CONFIG_POWERSUSPEND
  1163.     register_power_suspend(&msm_hotplug_power_suspend_driver);
  1164. #else
  1165.     hotplug.notif.notifier_call = fb_notifier_callback;
  1166.     if (fb_register_client(&hotplug.notif)) {
  1167.         pr_err("%s: Failed to register FB notifier callback\n",
  1168.             MSM_HOTPLUG);
  1169.         goto err_dev;
  1170.     }
  1171. #endif
  1172.  
  1173.     ret = input_register_handler(&hotplug_input_handler);
  1174.     if (ret) {
  1175.         pr_err("%s: Failed to register input handler: %d\n",
  1176.                MSM_HOTPLUG, ret);
  1177.         goto err_dev;
  1178.     }
  1179.  
  1180.     stats.load_hist = kmalloc(sizeof(stats.hist_size), GFP_KERNEL);
  1181.     if (!stats.load_hist) {
  1182.         pr_err("%s: Failed to allocate memory\n", MSM_HOTPLUG);
  1183.         ret = -ENOMEM;
  1184.         goto err_dev;
  1185.     }
  1186.  
  1187.     mutex_init(&stats.stats_mutex);
  1188.  
  1189.     INIT_DELAYED_WORK(&hotplug_work, msm_hotplug_work);
  1190.     INIT_WORK(&hotplug.up_work, cpu_up_work);
  1191.     INIT_WORK(&hotplug.down_work, cpu_down_work);
  1192.     INIT_WORK(&hotplug.resume_work, msm_hotplug_resume);
  1193.     INIT_WORK(&hotplug.suspend_work, msm_hotplug_suspend);
  1194.  
  1195.     for_each_possible_cpu(cpu) {
  1196.         dl = &per_cpu(lock_info, cpu);
  1197.         INIT_DELAYED_WORK(&dl->lock_rem, remove_down_lock);
  1198.     }
  1199.  
  1200.     if (hotplug.enabled)
  1201.         queue_delayed_work_on(0, hotplug_wq, &hotplug_work,
  1202.                       START_DELAY);
  1203.  
  1204.     return ret;
  1205. err_dev:
  1206.     module_kobj = NULL;
  1207.     destroy_workqueue(hotplug_wq);
  1208. err_out:
  1209.     return ret;
  1210. }
  1211.  
  1212. static struct platform_device msm_hotplug_device = {
  1213.     .name = MSM_HOTPLUG,
  1214.     .id = -1,
  1215. };
  1216.  
  1217. static int msm_hotplug_remove(struct platform_device *pdev)
  1218. {
  1219.     destroy_workqueue(hotplug_wq);
  1220.     input_unregister_handler(&hotplug_input_handler);
  1221.     kfree(stats.load_hist);
  1222.  
  1223.     return 0;
  1224. }
  1225.  
  1226. static struct platform_driver msm_hotplug_driver = {
  1227.     .probe = msm_hotplug_probe,
  1228.     .remove = msm_hotplug_remove,
  1229.     .driver = {
  1230.         .name = MSM_HOTPLUG,
  1231.         .owner = THIS_MODULE,
  1232.     },
  1233. };
  1234.  
  1235. static int __init msm_hotplug_init(void)
  1236. {
  1237.     int ret;
  1238.  
  1239.     ret = platform_driver_register(&msm_hotplug_driver);
  1240.     if (ret) {
  1241.         pr_err("%s: Driver register failed: %d\n", MSM_HOTPLUG, ret);
  1242.         return ret;
  1243.     }
  1244.  
  1245.     ret = platform_device_register(&msm_hotplug_device);
  1246.     if (ret) {
  1247.         pr_err("%s: Device register failed: %d\n", MSM_HOTPLUG, ret);
  1248.         return ret;
  1249.     }
  1250.  
  1251.     pr_info("%s: Device init\n", MSM_HOTPLUG);
  1252.  
  1253.     return ret;
  1254. }
  1255.  
  1256. static void __exit msm_hotplug_exit(void)
  1257. {
  1258.     platform_device_unregister(&msm_hotplug_device);
  1259.     platform_driver_unregister(&msm_hotplug_driver);
  1260. }
  1261.  
  1262. late_initcall(msm_hotplug_init);
  1263. module_exit(msm_hotplug_exit);
  1264.  
  1265. MODULE_AUTHOR("Fluxi <linflux@arcor.de>");
  1266. MODULE_DESCRIPTION("MSM Hotplug Driver");
  1267. MODULE_LICENSE("GPLv2");
Add Comment
Please, Sign In to add comment