Advertisement
Guest User

intelli_plug

a guest
Jan 17th, 2018
339
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 15.01 KB | None | 0 0
  1. /*
  2.  * Author: Paul Reioux aka Faux123 <reioux@gmail.com>
  3.  *
  4.  * Copyright 2012~2014 Paul Reioux
  5.  *
  6.  * This software is licensed under the terms of the GNU General Public
  7.  * License version 2, as published by the Free Software Foundation, and
  8.  * may be copied, distributed, and modified under those terms.
  9.  *
  10.  * This program is distributed in the hope that it will be useful,
  11.  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13.  * GNU General Public License for more details.
  14.  *
  15.  */
  16. #include <linux/workqueue.h>
  17. #include <linux/cpu.h>
  18. #include <linux/sched.h>
  19. #include <linux/mutex.h>
  20. #include <linux/module.h>
  21. #include <linux/slab.h>
  22. #include <linux/input.h>
  23. #include <linux/cpufreq.h>
  24.  
  25. #ifdef CONFIG_POWERSUSPEND
  26. #include <linux/powersuspend.h>
  27. #endif
  28.  
  29. #ifdef CONFIG_HAS_EARLYSUSPEND
  30. #include <linux/earlysuspend.h>
  31. #endif
  32.  
  33. //#define DEBUG_INTELLI_PLUG
  34. #undef DEBUG_INTELLI_PLUG
  35.  
  36. #define INTELLI_PLUG_MAJOR_VERSION  3
  37. #define INTELLI_PLUG_MINOR_VERSION  8
  38.  
  39. #define DEF_SAMPLING_MS         (268)
  40.  
  41. #define DUAL_PERSISTENCE        (2500 / DEF_SAMPLING_MS)
  42. #define TRI_PERSISTENCE         (1700 / DEF_SAMPLING_MS)
  43. #define QUAD_PERSISTENCE        (1000 / DEF_SAMPLING_MS)
  44.  
  45. #define BUSY_PERSISTENCE        (3500 / DEF_SAMPLING_MS)
  46.  
  47. static DEFINE_MUTEX(intelli_plug_mutex);
  48.  
  49. static struct delayed_work intelli_plug_work;
  50. static struct delayed_work intelli_plug_boost;
  51.  
  52. static struct workqueue_struct *intelliplug_wq;
  53. static struct workqueue_struct *intelliplug_boost_wq;
  54.  
  55. static unsigned int intelli_plug_active = 0;
  56. module_param(intelli_plug_active, uint, 0644);
  57.  
  58. static unsigned int touch_boost_active = 1;
  59. module_param(touch_boost_active, uint, 0644);
  60.  
  61. static unsigned int nr_run_profile_sel = 0;
  62. module_param(nr_run_profile_sel, uint, 0644);
  63.  
  64. //default to something sane rather than zero
  65. static unsigned int sampling_time = DEF_SAMPLING_MS;
  66.  
  67. static int persist_count = 0;
  68.  
  69. static bool suspended = false;
  70.  
  71. struct ip_cpu_info {
  72.     unsigned int sys_max;
  73.     unsigned int cur_max;
  74.     unsigned long cpu_nr_running;
  75. };
  76.  
  77. static DEFINE_PER_CPU(struct ip_cpu_info, ip_info);
  78.  
  79. static unsigned int screen_off_max = UINT_MAX;
  80. module_param(screen_off_max, uint, 0644);
  81.  
  82. #define CAPACITY_RESERVE    50
  83.  
  84. #if defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_APQ8064) || \
  85. defined(CONFIG_ARCH_MSM8974)
  86. #define THREAD_CAPACITY (339 - CAPACITY_RESERVE)
  87. #elif defined(CONFIG_ARCH_MSM8226) || defined (CONFIG_ARCH_MSM8926) || \
  88. defined (CONFIG_ARCH_MSM8610) || defined (CONFIG_ARCH_MSM8228)
  89. #define THREAD_CAPACITY (190 - CAPACITY_RESERVE)
  90. #else
  91. #define THREAD_CAPACITY (250 - CAPACITY_RESERVE)
  92. #endif
  93.  
  94. #define MULT_FACTOR 4
  95. #define DIV_FACTOR  100000
  96. #define NR_FSHIFT   3
  97.  
  98. static unsigned int nr_fshift = NR_FSHIFT;
  99.  
  100. static unsigned int nr_run_thresholds_balance[] = {
  101.     (THREAD_CAPACITY * 625 * MULT_FACTOR) / DIV_FACTOR,
  102.     (THREAD_CAPACITY * 875 * MULT_FACTOR) / DIV_FACTOR,
  103.     (THREAD_CAPACITY * 1125 * MULT_FACTOR) / DIV_FACTOR,
  104.     UINT_MAX
  105. };
  106.  
  107. static unsigned int nr_run_thresholds_performance[] = {
  108.     (THREAD_CAPACITY * 380 * MULT_FACTOR) / DIV_FACTOR,
  109.     (THREAD_CAPACITY * 625 * MULT_FACTOR) / DIV_FACTOR,
  110.     (THREAD_CAPACITY * 875 * MULT_FACTOR) / DIV_FACTOR,
  111.     UINT_MAX
  112. };
  113.  
  114. static unsigned int nr_run_thresholds_conservative[] = {
  115.     (THREAD_CAPACITY * 875 * MULT_FACTOR) / DIV_FACTOR,
  116.     (THREAD_CAPACITY * 1625 * MULT_FACTOR) / DIV_FACTOR,
  117.     (THREAD_CAPACITY * 2125 * MULT_FACTOR) / DIV_FACTOR,
  118.     UINT_MAX
  119. };
  120.  
  121. static unsigned int nr_run_thresholds_eco[] = {
  122.         (THREAD_CAPACITY * 380 * MULT_FACTOR) / DIV_FACTOR,
  123.     UINT_MAX
  124. };
  125.  
  126. static unsigned int nr_run_thresholds_eco_extreme[] = {
  127.         (THREAD_CAPACITY * 750 * MULT_FACTOR) / DIV_FACTOR,
  128.     UINT_MAX
  129. };
  130.  
  131. static unsigned int nr_run_thresholds_disable[] = {
  132.     0,  0,  0,  UINT_MAX
  133. };
  134.  
  135. static unsigned int *nr_run_profiles[] = {
  136.     nr_run_thresholds_balance,
  137.     nr_run_thresholds_performance,
  138.     nr_run_thresholds_conservative,
  139.     nr_run_thresholds_eco,
  140.     nr_run_thresholds_eco_extreme,
  141.     nr_run_thresholds_disable,
  142. };
  143.  
  144. #define NR_RUN_ECO_MODE_PROFILE 3
  145. #define NR_RUN_HYSTERESIS_QUAD  8
  146. #define NR_RUN_HYSTERESIS_DUAL  4
  147.  
  148. #define CPU_NR_THRESHOLD    ((THREAD_CAPACITY << 1) + (THREAD_CAPACITY / 2))
  149.  
  150. static unsigned int nr_possible_cores;
  151. module_param(nr_possible_cores, uint, 0444);
  152.  
  153. static unsigned int cpu_nr_run_threshold = CPU_NR_THRESHOLD;
  154. module_param(cpu_nr_run_threshold, uint, 0644);
  155.  
  156. static unsigned int nr_run_hysteresis = NR_RUN_HYSTERESIS_QUAD;
  157. module_param(nr_run_hysteresis, uint, 0644);
  158.  
  159. static unsigned int nr_run_last;
  160.  
  161. extern unsigned long avg_nr_running(void);
  162. extern unsigned long avg_cpu_nr_running(unsigned int cpu);
  163.  
  164. static unsigned int calculate_thread_stats(void)
  165. {
  166.     unsigned int avg_nr_run = avg_nr_running();
  167.     unsigned int nr_run;
  168.     unsigned int threshold_size;
  169.     unsigned int *current_profile;
  170.  
  171.     current_profile = nr_run_profiles[nr_run_profile_sel];
  172.     if (num_possible_cpus() > 2) {
  173.         if (nr_run_profile_sel >= NR_RUN_ECO_MODE_PROFILE)
  174.             threshold_size =
  175.                 ARRAY_SIZE(nr_run_thresholds_eco);
  176.         else
  177.             threshold_size =
  178.                 ARRAY_SIZE(nr_run_thresholds_balance);
  179.     } else
  180.         threshold_size =
  181.             ARRAY_SIZE(nr_run_thresholds_eco);
  182.  
  183.     if (nr_run_profile_sel >= NR_RUN_ECO_MODE_PROFILE)
  184.         nr_fshift = 1;
  185.     else
  186.         nr_fshift = num_possible_cpus() - 1;
  187.  
  188.     for (nr_run = 1; nr_run < threshold_size; nr_run++) {
  189.         unsigned int nr_threshold;
  190.         nr_threshold = current_profile[nr_run - 1];
  191.  
  192.         if (nr_run_last <= nr_run)
  193.             nr_threshold += nr_run_hysteresis;
  194.         if (avg_nr_run <= (nr_threshold << (FSHIFT - nr_fshift)))
  195.             break;
  196.     }
  197.     nr_run_last = nr_run;
  198.  
  199.     return nr_run;
  200. }
  201.  
  202. static void __cpuinit intelli_plug_boost_fn(struct work_struct *work)
  203. {
  204.  
  205.     int nr_cpus = num_online_cpus();
  206.  
  207.     if (intelli_plug_active)
  208.         if (touch_boost_active)
  209.             if (nr_cpus < 2)
  210.                 cpu_up(1);
  211. }
  212.  
  213. /*
  214. static int cmp_nr_running(const void *a, const void *b)
  215. {
  216.     return *(unsigned long *)a - *(unsigned long *)b;
  217. }
  218. */
  219.  
  220. static void update_per_cpu_stat(void)
  221. {
  222.     unsigned int cpu;
  223.     struct ip_cpu_info *l_ip_info;
  224.  
  225.     for_each_online_cpu(cpu) {
  226.         l_ip_info = &per_cpu(ip_info, cpu);
  227.         l_ip_info->cpu_nr_running = avg_cpu_nr_running(cpu);
  228. #ifdef DEBUG_INTELLI_PLUG
  229.         pr_info("cpu %u nr_running => %lu\n", cpu,
  230.             l_ip_info->cpu_nr_running);
  231. #endif
  232.     }
  233. }
  234.  
  235. static void unplug_cpu(int min_active_cpu)
  236. {
  237.     unsigned int cpu;
  238.     struct ip_cpu_info *l_ip_info;
  239.     int l_nr_threshold;
  240.  
  241.     for_each_online_cpu(cpu) {
  242.         l_nr_threshold =
  243.             cpu_nr_run_threshold << 1 / (num_online_cpus());
  244.         if (cpu == 0)
  245.             continue;
  246.         l_ip_info = &per_cpu(ip_info, cpu);
  247.         if (cpu > min_active_cpu)
  248.             if (l_ip_info->cpu_nr_running < l_nr_threshold)
  249.                 cpu_down(cpu);
  250.     }
  251. }
  252.  
  253. static void __cpuinit intelli_plug_work_fn(struct work_struct *work)
  254. {
  255.     unsigned int nr_run_stat;
  256.     unsigned int cpu_count = 0;
  257.     unsigned int nr_cpus = 0;
  258.  
  259.     int i;
  260.  
  261.     if (intelli_plug_active) {
  262.         nr_run_stat = calculate_thread_stats();
  263.         update_per_cpu_stat();
  264. #ifdef DEBUG_INTELLI_PLUG
  265.         pr_info("nr_run_stat: %u\n", nr_run_stat);
  266. #endif
  267.         cpu_count = nr_run_stat;
  268.         nr_cpus = num_online_cpus();
  269.  
  270.         if (!suspended) {
  271.  
  272.             if (persist_count > 0)
  273.                 persist_count--;
  274.  
  275.             switch (cpu_count) {
  276.             case 1:
  277.                 if (persist_count == 0) {
  278.                     //take down everyone
  279.                     unplug_cpu(0);
  280.                 }
  281. #ifdef DEBUG_INTELLI_PLUG
  282.                 pr_info("case 1: %u\n", persist_count);
  283. #endif
  284.                 break;
  285.             case 2:
  286.                 if (persist_count == 0)
  287.                     persist_count = DUAL_PERSISTENCE;
  288.                 if (nr_cpus < 2) {
  289.                     for (i = 1; i < cpu_count; i++)
  290.                         cpu_up(i);
  291.                 } else {
  292.                     unplug_cpu(1);
  293.                 }
  294. #ifdef DEBUG_INTELLI_PLUG
  295.                 pr_info("case 2: %u\n", persist_count);
  296. #endif
  297.                 break;
  298.             case 3:
  299.                 if (persist_count == 0)
  300.                     persist_count = TRI_PERSISTENCE;
  301.                 if (nr_cpus < 3) {
  302.                     for (i = 1; i < cpu_count; i++)
  303.                         cpu_up(i);
  304.                 } else {
  305.                     unplug_cpu(2);
  306.                 }
  307. #ifdef DEBUG_INTELLI_PLUG
  308.                 pr_info("case 3: %u\n", persist_count);
  309. #endif
  310.                 break;
  311.             case 4:
  312.                 if (persist_count == 0)
  313.                     persist_count = QUAD_PERSISTENCE;
  314.                 if (nr_cpus < 4)
  315.                     for (i = 1; i < cpu_count; i++)
  316.                         cpu_up(i);
  317. #ifdef DEBUG_INTELLI_PLUG
  318.                 pr_info("case 4: %u\n", persist_count);
  319. #endif
  320.                 break;
  321.             default:
  322.                 pr_err("Run Stat Error: Bad value %u\n", nr_run_stat);
  323.                 break;
  324.             }
  325.         }
  326. #ifdef DEBUG_INTELLI_PLUG
  327.         else
  328.             pr_info("intelli_plug is suspened!\n");
  329. #endif
  330.     }
  331.     queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
  332.         msecs_to_jiffies(sampling_time));
  333. }
  334.  
  335. #if defined(CONFIG_POWERSUSPEND) || defined(CONFIG_HAS_EARLYSUSPEND)
  336. static void screen_off_limit(bool on)
  337. {
  338.     unsigned int cpu;
  339.     struct cpufreq_policy *policy;
  340.     struct ip_cpu_info *l_ip_info;
  341.  
  342.     /* not active, so exit */
  343.     if (screen_off_max == UINT_MAX)
  344.         return;
  345.  
  346.     for_each_online_cpu(cpu) {
  347.         l_ip_info = &per_cpu(ip_info, cpu);
  348.         policy = cpufreq_cpu_get(0);
  349.  
  350.         if (on) {
  351.             /* save current instance */
  352.             l_ip_info->cur_max = policy->max;
  353.             policy->max = screen_off_max;
  354.             policy->cpuinfo.max_freq = screen_off_max;
  355. #ifdef DEBUG_INTELLI_PLUG
  356.             pr_info("cpuinfo max is (on): %u %u\n",
  357.                 policy->cpuinfo.max_freq, l_ip_info->sys_max);
  358. #endif
  359.         } else {
  360.             /* restore */
  361.             if (cpu != 0) {
  362.                 l_ip_info = &per_cpu(ip_info, 0);
  363.             }
  364.             policy->cpuinfo.max_freq = l_ip_info->sys_max;
  365.             policy->max = l_ip_info->cur_max;
  366. #ifdef DEBUG_INTELLI_PLUG
  367.             pr_info("cpuinfo max is (off): %u %u\n",
  368.                 policy->cpuinfo.max_freq, l_ip_info->sys_max);
  369. #endif
  370.         }
  371.         cpufreq_update_policy(cpu);
  372.     }
  373. }
  374.  
  375. #ifdef CONFIG_POWERSUSPEND
  376. static void intelli_plug_suspend(struct power_suspend *handler)
  377. #else
  378. static void intelli_plug_suspend(struct early_suspend *handler)
  379. #endif
  380. {
  381.     if (intelli_plug_active) {
  382.         int cpu;
  383.    
  384.         flush_workqueue(intelliplug_wq);
  385.  
  386.         mutex_lock(&intelli_plug_mutex);
  387.         suspended = true;
  388.         screen_off_limit(true);
  389.         mutex_unlock(&intelli_plug_mutex);
  390.  
  391.         // put rest of the cores to sleep unconditionally!
  392.         for_each_online_cpu(cpu) {
  393.             if (cpu != 0)
  394.                 cpu_down(cpu);
  395.         }
  396.     }
  397. }
  398.  
  399. static void wakeup_boost(void)
  400. {
  401.     unsigned int cpu;
  402.     struct cpufreq_policy *policy;
  403.     struct ip_cpu_info *l_ip_info;
  404.  
  405.     for_each_online_cpu(cpu) {
  406.         policy = cpufreq_cpu_get(0);
  407.         l_ip_info = &per_cpu(ip_info, 0);
  408.         policy->cur = l_ip_info->cur_max;
  409.         cpufreq_update_policy(cpu);
  410.     }
  411. }
  412.  
  413. #ifdef CONFIG_POWERSUSPEND
  414. static void __cpuinit intelli_plug_resume(struct power_suspend *handler)
  415. #else
  416. static void __cpuinit intelli_plug_resume(struct early_suspend *handler)
  417. #endif
  418. {
  419.  
  420.     if (intelli_plug_active) {
  421.         int cpu;
  422.  
  423.         mutex_lock(&intelli_plug_mutex);
  424.         /* keep cores awake long enough for faster wake up */
  425.         persist_count = BUSY_PERSISTENCE;
  426.         suspended = false;
  427.         mutex_unlock(&intelli_plug_mutex);
  428.  
  429.         for_each_possible_cpu(cpu) {
  430.             if (cpu == 0)
  431.                 continue;
  432.             cpu_up(cpu);
  433.         }
  434.  
  435.         wakeup_boost();
  436.         screen_off_limit(false);
  437.     }
  438.     queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
  439.         msecs_to_jiffies(10));
  440. }
  441. #endif
  442.  
  443. #ifdef CONFIG_POWERSUSPEND
  444. static struct power_suspend intelli_plug_power_suspend_driver = {
  445.     .suspend = intelli_plug_suspend,
  446.     .resume = intelli_plug_resume,
  447. };
  448. #endif  /* CONFIG_POWERSUSPEND */
  449.  
  450. #ifdef CONFIG_HAS_EARLYSUSPEND
  451. static struct early_suspend intelli_plug_early_suspend_driver = {
  452.         .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 10,
  453.         .suspend = intelli_plug_suspend,
  454.         .resume = intelli_plug_resume,
  455. };
  456. #endif  /* CONFIG_HAS_EARLYSUSPEND */
  457.  
  458. static void intelli_plug_input_event(struct input_handle *handle,
  459.         unsigned int type, unsigned int code, int value)
  460. {
  461. #ifdef DEBUG_INTELLI_PLUG
  462.     pr_info("intelli_plug touched!\n");
  463. #endif
  464.     queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_boost,
  465.         msecs_to_jiffies(10));
  466. }
  467.  
  468. static int intelli_plug_input_connect(struct input_handler *handler,
  469.         struct input_dev *dev, const struct input_device_id *id)
  470. {
  471.     struct input_handle *handle;
  472.     int error;
  473.  
  474.     handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  475.     if (!handle)
  476.         return -ENOMEM;
  477.  
  478.     handle->dev = dev;
  479.     handle->handler = handler;
  480.     handle->name = "intelliplug";
  481.  
  482.     error = input_register_handle(handle);
  483.     if (error)
  484.         goto err2;
  485.  
  486.     error = input_open_device(handle);
  487.     if (error)
  488.         goto err1;
  489.     pr_info("%s found and connected!\n", dev->name);
  490.     return 0;
  491. err1:
  492.     input_unregister_handle(handle);
  493. err2:
  494.     kfree(handle);
  495.     return error;
  496. }
  497.  
  498. static void intelli_plug_input_disconnect(struct input_handle *handle)
  499. {
  500.     input_close_device(handle);
  501.     input_unregister_handle(handle);
  502.     kfree(handle);
  503. }
  504.  
  505. static const struct input_device_id intelli_plug_ids[] = {
  506.     {
  507.         .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
  508.              INPUT_DEVICE_ID_MATCH_ABSBIT,
  509.         .evbit = { BIT_MASK(EV_ABS) },
  510.         .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
  511.                 BIT_MASK(ABS_MT_POSITION_X) |
  512.                 BIT_MASK(ABS_MT_POSITION_Y) },
  513.     }, /* multi-touch touchscreen */
  514.     {
  515.         .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
  516.              INPUT_DEVICE_ID_MATCH_ABSBIT,
  517.         .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
  518.         .absbit = { [BIT_WORD(ABS_X)] =
  519.                 BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
  520.     }, /* touchpad */
  521.     { },
  522. };
  523.  
  524. static struct input_handler intelli_plug_input_handler = {
  525.     .event          = intelli_plug_input_event,
  526.     .connect        = intelli_plug_input_connect,
  527.     .disconnect     = intelli_plug_input_disconnect,
  528.     .name           = "intelliplug_handler",
  529.     .id_table       = intelli_plug_ids,
  530. };
  531.  
  532. int __init intelli_plug_init(void)
  533. {
  534.     int rc;
  535. #if defined (CONFIG_POWERSUSPEND) || defined(CONFIG_HAS_EARLYSUSPEND)
  536.     struct cpufreq_policy *policy;
  537.     struct ip_cpu_info *l_ip_info;
  538. #endif
  539.  
  540.     nr_possible_cores = num_possible_cpus();
  541.  
  542.     pr_info("intelli_plug: version %d.%d by faux123\n",
  543.          INTELLI_PLUG_MAJOR_VERSION,
  544.          INTELLI_PLUG_MINOR_VERSION);
  545.  
  546.     if (nr_possible_cores > 2) {
  547.         nr_run_hysteresis = NR_RUN_HYSTERESIS_QUAD;
  548.         nr_run_profile_sel = 0;
  549.     } else {
  550.         nr_run_hysteresis = NR_RUN_HYSTERESIS_DUAL;
  551.         nr_run_profile_sel = NR_RUN_ECO_MODE_PROFILE;
  552.     }
  553.  
  554. #if defined (CONFIG_POWERSUSPEND) || defined(CONFIG_HAS_EARLYSUSPEND)
  555.     l_ip_info = &per_cpu(ip_info, 0);
  556.     policy = cpufreq_cpu_get(0);
  557.     l_ip_info->sys_max = policy->cpuinfo.max_freq;
  558.     l_ip_info->cur_max = policy->max;
  559. #endif
  560.  
  561.     rc = input_register_handler(&intelli_plug_input_handler);
  562. #ifdef CONFIG_POWERSUSPEND
  563.     register_power_suspend(&intelli_plug_power_suspend_driver);
  564. #endif
  565. #ifdef CONFIG_HAS_EARLYSUSPEND
  566.     register_early_suspend(&intelli_plug_early_suspend_driver);
  567. #endif
  568.     intelliplug_wq = alloc_workqueue("intelliplug",
  569.                 WQ_HIGHPRI | WQ_UNBOUND, 1);
  570.     intelliplug_boost_wq = alloc_workqueue("iplug_boost",
  571.                 WQ_HIGHPRI | WQ_UNBOUND, 1);
  572.     INIT_DELAYED_WORK(&intelli_plug_work, intelli_plug_work_fn);
  573.     INIT_DELAYED_WORK(&intelli_plug_boost, intelli_plug_boost_fn);
  574.     queue_delayed_work_on(0, intelliplug_wq, &intelli_plug_work,
  575.         msecs_to_jiffies(10));
  576.  
  577.     return 0;
  578. }
  579.  
  580. MODULE_AUTHOR("Paul Reioux <reioux@gmail.com>");
  581. MODULE_DESCRIPTION("'intell_plug' - An intelligent cpu hotplug driver for "
  582.     "Low Latency Frequency Transition capable processors");
  583. MODULE_LICENSE("GPL");
  584.  
  585. late_initcall(intelli_plug_init);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement