Advertisement
Guest User

Untitled

a guest
Nov 21st, 2013
177
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 33.22 KB | None | 0 0
  1. /*
  2. * arch/arm/mach-msm/msm_mpdecision.c
  3. *
  4. * This program features:
  5. * -cpu auto-hotplug/unplug based on system load for MSM multicore cpus
  6. * -single core while screen is off
  7. * -extensive sysfs tuneables
  8. *
  9. * Copyright (c) 2012-2013, Dennis Rassmann <showp1984@gmail.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful, but WITHOUT
  17. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  18. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  19. * more details.
  20. *
  21. * You should have received a copy of the GNU General Public License along
  22. * with this program; if not, write to the Free Software Foundation, Inc.,
  23. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  24. */
  25.  
  26. #include "msm_mpdecision.h"
  27. #ifndef CONFIG_HAS_EARLYSUSPEND
  28. #include <linux/lcd_notify.h>
  29. #else
  30. #include <linux/earlysuspend.h>
  31. #endif
  32. #include <linux/init.h>
  33. #include <linux/cpufreq.h>
  34. #include <linux/workqueue.h>
  35. #include <linux/completion.h>
  36. #include <linux/cpu.h>
  37. #include <linux/cpumask.h>
  38. #include <asm-generic/cputime.h>
  39. #include <linux/hrtimer.h>
  40. #include <linux/delay.h>
  41. #include <linux/export.h>
  42. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  43. #include <linux/input.h>
  44. #include <linux/slab.h>
  45. #endif
  46. #include "acpuclock.h"
  47.  
  48. #define DEBUG 0
  49.  
  50. DEFINE_PER_CPU(struct msm_mpdec_cpudata_t, msm_mpdec_cpudata);
  51. EXPORT_PER_CPU_SYMBOL_GPL(msm_mpdec_cpudata);
  52.  
  53. static bool mpdec_suspended = false;
  54.  
  55. #ifndef CONFIG_HAS_EARLYSUSPEND
  56. static struct notifier_block msm_mpdec_lcd_notif;
  57. #else
  58. #endif
  59. static struct delayed_work msm_mpdec_work;
  60. static struct workqueue_struct *msm_mpdec_workq;
  61. static DEFINE_MUTEX(mpdec_msm_cpu_lock);
  62. static DEFINE_MUTEX(mpdec_msm_susres_lock);
  63. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  64. static struct workqueue_struct *mpdec_input_wq;
  65. static DEFINE_PER_CPU(struct work_struct, mpdec_input_work);
  66. static struct workqueue_struct *msm_mpdec_revib_workq;
  67. static DEFINE_PER_CPU(struct delayed_work, msm_mpdec_revib_work);
  68. #endif
  69.  
  70. static struct msm_mpdec_tuners {
  71. unsigned int startdelay;
  72. unsigned int delay;
  73. unsigned int pause;
  74. bool scroff_single_core;
  75. unsigned long int idle_freq;
  76. unsigned int max_cpus;
  77. unsigned int min_cpus;
  78. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  79. bool boost_enabled;
  80. unsigned int boost_time;
  81. unsigned long int boost_freq[4];
  82. #endif
  83. } msm_mpdec_tuners_ins = {
  84. .startdelay = MSM_MPDEC_STARTDELAY,
  85. .delay = MSM_MPDEC_DELAY,
  86. .pause = MSM_MPDEC_PAUSE,
  87. .scroff_single_core = true,
  88. .idle_freq = MSM_MPDEC_IDLE_FREQ,
  89. .max_cpus = CONFIG_NR_CPUS,
  90. .min_cpus = 1,
  91. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  92. .boost_enabled = true,
  93. .boost_time = MSM_MPDEC_BOOSTTIME,
  94. .boost_freq = {
  95. MSM_MPDEC_BOOSTFREQ_CPU0,
  96. MSM_MPDEC_BOOSTFREQ_CPU1,
  97. MSM_MPDEC_BOOSTFREQ_CPU2,
  98. MSM_MPDEC_BOOSTFREQ_CPU3
  99. },
  100. #endif
  101. };
  102.  
  103. static unsigned int NwNs_Threshold[8] = {12, 0, 20, 7, 25, 10, 0, 18};
  104. static unsigned int TwTs_Threshold[8] = {140, 0, 140, 190, 140, 190, 0, 190};
  105.  
  106. extern unsigned int get_rq_info(void);
  107. extern unsigned long acpuclk_get_rate(int);
  108.  
  109. unsigned int state = MSM_MPDEC_IDLE;
  110. bool was_paused = false;
  111. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  112. bool is_screen_on = true;
  113. static int update_cpu_min_freq(struct cpufreq_policy *cpu_policy,
  114. int cpu, int new_freq);
  115. static void unboost_cpu(int cpu);
  116. #endif
  117. static cputime64_t mpdec_paused_until = 0;
  118.  
  119. static unsigned long get_rate(int cpu) {
  120. return acpuclk_get_rate(cpu);
  121. }
  122.  
  123. static int get_slowest_cpu(void) {
  124. int i, cpu = 0;
  125. unsigned long rate, slow_rate = 0;
  126.  
  127. for (i = 1; i < CONFIG_NR_CPUS; i++) {
  128. if (!cpu_online(i))
  129. continue;
  130. rate = get_rate(i);
  131. if (slow_rate == 0) {
  132. cpu = i;
  133. slow_rate = rate;
  134. continue;
  135. }
  136. if ((rate <= slow_rate) && (slow_rate != 0)) {
  137. cpu = i;
  138. slow_rate = rate;
  139. }
  140. }
  141.  
  142. return cpu;
  143. }
  144.  
  145. static unsigned long get_slowest_cpu_rate(void) {
  146. int i = 0;
  147. unsigned long rate, slow_rate = 0;
  148.  
  149. for (i = 0; i < CONFIG_NR_CPUS; i++) {
  150. if (!cpu_online(i))
  151. continue;
  152. rate = get_rate(i);
  153. if ((rate < slow_rate) && (slow_rate != 0)) {
  154. slow_rate = rate;
  155. continue;
  156. }
  157. if (slow_rate == 0) {
  158. slow_rate = rate;
  159. }
  160. }
  161.  
  162. return slow_rate;
  163. }
  164.  
  165. static void mpdec_cpu_up(int cpu) {
  166. if (!cpu_online(cpu)) {
  167. mutex_lock(&per_cpu(msm_mpdec_cpudata, cpu).hotplug_mutex);
  168. cpu_up(cpu);
  169. per_cpu(msm_mpdec_cpudata, cpu).on_time = ktime_to_ms(ktime_get());
  170. per_cpu(msm_mpdec_cpudata, cpu).online = true;
  171. per_cpu(msm_mpdec_cpudata, cpu).times_cpu_hotplugged += 1;
  172. pr_info(MPDEC_TAG"CPU[%d] off->on | Mask=[%d%d%d%d]\n",
  173. cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
  174. mutex_unlock(&per_cpu(msm_mpdec_cpudata, cpu).hotplug_mutex);
  175. }
  176. }
  177. EXPORT_SYMBOL_GPL(mpdec_cpu_up);
  178.  
  179. static void mpdec_cpu_down(int cpu) {
  180. cputime64_t on_time = 0;
  181. if (cpu_online(cpu)) {
  182. mutex_lock(&per_cpu(msm_mpdec_cpudata, cpu).hotplug_mutex);
  183. cpu_down(cpu);
  184. on_time = (ktime_to_ms(ktime_get()) - per_cpu(msm_mpdec_cpudata, cpu).on_time);
  185. per_cpu(msm_mpdec_cpudata, cpu).online = false;
  186. per_cpu(msm_mpdec_cpudata, cpu).on_time_total += on_time;
  187. per_cpu(msm_mpdec_cpudata, cpu).times_cpu_unplugged += 1;
  188. pr_info(MPDEC_TAG"CPU[%d] on->off | Mask=[%d%d%d%d] | time online: %llu\n",
  189. cpu, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3), on_time);
  190. mutex_unlock(&per_cpu(msm_mpdec_cpudata, cpu).hotplug_mutex);
  191. }
  192. }
  193. EXPORT_SYMBOL_GPL(mpdec_cpu_down);
  194.  
  195. static int mp_decision(void) {
  196. static bool first_call = true;
  197. int new_state = MSM_MPDEC_IDLE;
  198. int nr_cpu_online;
  199. int index;
  200. unsigned int rq_depth;
  201. static cputime64_t total_time = 0;
  202. static cputime64_t last_time;
  203. cputime64_t current_time;
  204. cputime64_t this_time = 0;
  205.  
  206. if (state == MSM_MPDEC_DISABLED)
  207. return MSM_MPDEC_DISABLED;
  208.  
  209. current_time = ktime_to_ms(ktime_get());
  210.  
  211. if (first_call) {
  212. first_call = false;
  213. } else {
  214. this_time = current_time - last_time;
  215. }
  216. total_time += this_time;
  217.  
  218. rq_depth = get_rq_info();
  219. nr_cpu_online = num_online_cpus();
  220.  
  221. if (nr_cpu_online) {
  222. index = (nr_cpu_online - 1) * 2;
  223. if ((nr_cpu_online < CONFIG_NR_CPUS) && (rq_depth >= NwNs_Threshold[index])) {
  224. if ((total_time >= TwTs_Threshold[index]) &&
  225. (nr_cpu_online < msm_mpdec_tuners_ins.max_cpus)) {
  226. new_state = MSM_MPDEC_UP;
  227. if (get_slowest_cpu_rate() <= msm_mpdec_tuners_ins.idle_freq)
  228. new_state = MSM_MPDEC_IDLE;
  229. }
  230. } else if ((nr_cpu_online > 1) && (rq_depth <= NwNs_Threshold[index+1])) {
  231. if ((total_time >= TwTs_Threshold[index+1]) &&
  232. (nr_cpu_online > msm_mpdec_tuners_ins.min_cpus)) {
  233. new_state = MSM_MPDEC_DOWN;
  234. if (get_slowest_cpu_rate() > msm_mpdec_tuners_ins.idle_freq)
  235. new_state = MSM_MPDEC_IDLE;
  236. }
  237. } else {
  238. new_state = MSM_MPDEC_IDLE;
  239. total_time = 0;
  240. }
  241. } else {
  242. total_time = 0;
  243. }
  244.  
  245. if (new_state != MSM_MPDEC_IDLE) {
  246. total_time = 0;
  247. }
  248.  
  249. last_time = ktime_to_ms(ktime_get());
  250. #if DEBUG
  251. pr_info(MPDEC_TAG"[DEBUG] rq: %u, new_state: %i | Mask=[%d%d%d%d]\n",
  252. rq_depth, new_state, cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
  253. #endif
  254. return new_state;
  255. }
  256.  
  257. static void msm_mpdec_work_thread(struct work_struct *work) {
  258. unsigned int cpu = nr_cpu_ids;
  259.  
  260. /* Check if we are paused */
  261. if (mpdec_paused_until >= ktime_to_ms(ktime_get()))
  262. goto out;
  263.  
  264. if (mpdec_suspended == true)
  265. goto out;
  266.  
  267. if (!mutex_trylock(&mpdec_msm_cpu_lock))
  268. goto out;
  269.  
  270. /* if sth messed with the cpus, update the check vars so we can proceed */
  271. if (was_paused) {
  272. for_each_possible_cpu(cpu) {
  273. if (cpu_online(cpu))
  274. per_cpu(msm_mpdec_cpudata, cpu).online = true;
  275. else if (!cpu_online(cpu))
  276. per_cpu(msm_mpdec_cpudata, cpu).online = false;
  277. }
  278. was_paused = false;
  279. }
  280.  
  281. state = mp_decision();
  282. switch (state) {
  283. case MSM_MPDEC_DISABLED:
  284. case MSM_MPDEC_IDLE:
  285. break;
  286. case MSM_MPDEC_DOWN:
  287. cpu = get_slowest_cpu();
  288. if (cpu < nr_cpu_ids) {
  289. if ((per_cpu(msm_mpdec_cpudata, cpu).online == true) && (cpu_online(cpu))) {
  290. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  291. unboost_cpu(cpu);
  292. #endif
  293. mpdec_cpu_down(cpu);
  294. } else if (per_cpu(msm_mpdec_cpudata, cpu).online != cpu_online(cpu)) {
  295. pr_info(MPDEC_TAG"CPU[%d] was controlled outside of mpdecision! | pausing [%d]ms\n",
  296. cpu, msm_mpdec_tuners_ins.pause);
  297. mpdec_paused_until = ktime_to_ms(ktime_get()) + msm_mpdec_tuners_ins.pause;
  298. was_paused = true;
  299. }
  300. }
  301. break;
  302. case MSM_MPDEC_UP:
  303. cpu = cpumask_next_zero(0, cpu_online_mask);
  304. if (cpu < nr_cpu_ids) {
  305. if ((per_cpu(msm_mpdec_cpudata, cpu).online == false) && (!cpu_online(cpu))) {
  306. mpdec_cpu_up(cpu);
  307. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  308. unboost_cpu(cpu);
  309. #endif
  310. } else if (per_cpu(msm_mpdec_cpudata, cpu).online != cpu_online(cpu)) {
  311. pr_info(MPDEC_TAG"CPU[%d] was controlled outside of mpdecision! | pausing [%d]ms\n",
  312. cpu, msm_mpdec_tuners_ins.pause);
  313. mpdec_paused_until = ktime_to_ms(ktime_get()) + msm_mpdec_tuners_ins.pause;
  314. was_paused = true;
  315. }
  316. }
  317. break;
  318. default:
  319. pr_err(MPDEC_TAG"%s: invalid mpdec hotplug state %d\n",
  320. __func__, state);
  321. }
  322. mutex_unlock(&mpdec_msm_cpu_lock);
  323.  
  324. out:
  325. if (state != MSM_MPDEC_DISABLED)
  326. queue_delayed_work(msm_mpdec_workq, &msm_mpdec_work,
  327. msecs_to_jiffies(msm_mpdec_tuners_ins.delay));
  328. return;
  329. }
  330.  
  331. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  332. static int update_cpu_min_freq(struct cpufreq_policy *cpu_policy,
  333. int cpu, int new_freq) {
  334. int ret = 0;
  335.  
  336. if (!cpu_policy)
  337. return -EINVAL;
  338.  
  339. cpufreq_verify_within_limits(cpu_policy, new_freq, cpu_policy->max);
  340. cpu_policy->user_policy.min = new_freq;
  341.  
  342. ret = cpufreq_update_policy(cpu);
  343. if (!ret) {
  344. pr_debug(MPDEC_TAG"Touch event! Setting CPU%d min frequency to %d\n",
  345. cpu, new_freq);
  346. }
  347. return ret;
  348. }
  349.  
  350. static void unboost_cpu(int cpu) {
  351. struct cpufreq_policy *cpu_policy = NULL;
  352.  
  353. if (cpu_online(cpu)) {
  354. if (per_cpu(msm_mpdec_cpudata, cpu).is_boosted) {
  355. if (mutex_trylock(&per_cpu(msm_mpdec_cpudata, cpu).unboost_mutex)) {
  356. cpu_policy = cpufreq_cpu_get(cpu);
  357. if (!cpu_policy) {
  358. pr_debug(MPDEC_TAG"NULL policy on cpu %d\n", cpu);
  359. return;
  360. }
  361. #if DEBUG
  362. pr_info(MPDEC_TAG"un boosted cpu%i to %lu", cpu, per_cpu(msm_mpdec_cpudata, cpu).norm_min_freq);
  363. #endif
  364. per_cpu(msm_mpdec_cpudata, cpu).is_boosted = false;
  365. per_cpu(msm_mpdec_cpudata, cpu).revib_wq_running = false;
  366. if ((cpu_policy->min != per_cpu(msm_mpdec_cpudata, cpu).boost_freq) &&
  367. (cpu_policy->min != per_cpu(msm_mpdec_cpudata, cpu).norm_min_freq)) {
  368. pr_info(MPDEC_TAG"cpu%u min was changed while boosted (%lu->%u), using new min",
  369. cpu, per_cpu(msm_mpdec_cpudata, cpu).norm_min_freq, cpu_policy->min);
  370. per_cpu(msm_mpdec_cpudata, cpu).norm_min_freq = cpu_policy->min;
  371. }
  372. update_cpu_min_freq(cpu_policy, cpu, per_cpu(msm_mpdec_cpudata, cpu).norm_min_freq);
  373. cpufreq_cpu_put(cpu_policy);
  374. mutex_unlock(&per_cpu(msm_mpdec_cpudata, cpu).unboost_mutex);
  375. }
  376. }
  377. }
  378.  
  379. return;
  380. }
  381.  
  382. static void msm_mpdec_revib_work_thread(struct work_struct *work) {
  383. int cpu = smp_processor_id();
  384.  
  385. if (per_cpu(msm_mpdec_cpudata, cpu).is_boosted) {
  386. per_cpu(msm_mpdec_cpudata, cpu).revib_wq_running = true;
  387. if (ktime_to_ms(ktime_get()) > per_cpu(msm_mpdec_cpudata, cpu).boost_until) {
  388. unboost_cpu(cpu);
  389. } else {
  390. queue_delayed_work_on(
  391. cpu,
  392. msm_mpdec_revib_workq,
  393. &per_cpu(msm_mpdec_revib_work, cpu),
  394. msecs_to_jiffies((per_cpu(msm_mpdec_cpudata, cpu).boost_until - ktime_to_ms(ktime_get())))
  395. );
  396. }
  397. } else {
  398. per_cpu(msm_mpdec_cpudata, cpu).revib_wq_running = false;
  399. }
  400. return;
  401. }
  402.  
  403. static void mpdec_input_callback(struct work_struct *unused) {
  404. struct cpufreq_policy *cpu_policy = NULL;
  405. int cpu = smp_processor_id();
  406. bool boosted = false;
  407.  
  408. if (!per_cpu(msm_mpdec_cpudata, cpu).is_boosted) {
  409. if (mutex_trylock(&per_cpu(msm_mpdec_cpudata, cpu).boost_mutex)) {
  410. cpu_policy = cpufreq_cpu_get(cpu);
  411. if (!cpu_policy) {
  412. pr_debug(MPDEC_TAG"NULL policy on cpu %d\n", cpu);
  413. return;
  414. }
  415. per_cpu(msm_mpdec_cpudata, cpu).norm_min_freq = cpu_policy->min;
  416.  
  417. /* check if boost freq is > minfreq */
  418. cpufreq_verify_within_limits(cpu_policy, cpu_policy->min, per_cpu(msm_mpdec_cpudata, cpu).boost_freq);
  419.  
  420. update_cpu_min_freq(cpu_policy, cpu, per_cpu(msm_mpdec_cpudata, cpu).boost_freq);
  421. #if DEBUG
  422. pr_info(MPDEC_TAG"boosted cpu%i to %lu", cpu, per_cpu(msm_mpdec_cpudata, cpu).boost_freq);
  423. #endif
  424. per_cpu(msm_mpdec_cpudata, cpu).is_boosted = true;
  425. per_cpu(msm_mpdec_cpudata, cpu).boost_until = ktime_to_ms(ktime_get()) + msm_mpdec_tuners_ins.boost_time;
  426. boosted = true;
  427. cpufreq_cpu_put(cpu_policy);
  428. mutex_unlock(&per_cpu(msm_mpdec_cpudata, cpu).boost_mutex);
  429. }
  430. } else {
  431. boosted = true;
  432. }
  433. if (boosted && !per_cpu(msm_mpdec_cpudata, cpu).revib_wq_running) {
  434. per_cpu(msm_mpdec_cpudata, cpu).revib_wq_running = true;
  435. queue_delayed_work_on(
  436. cpu,
  437. msm_mpdec_revib_workq,
  438. &per_cpu(msm_mpdec_revib_work, cpu),
  439. msecs_to_jiffies(msm_mpdec_tuners_ins.boost_time)
  440. );
  441. } else if (boosted && per_cpu(msm_mpdec_cpudata, cpu).revib_wq_running) {
  442. per_cpu(msm_mpdec_cpudata, cpu).boost_until = ktime_to_ms(ktime_get()) + msm_mpdec_tuners_ins.boost_time;
  443. }
  444.  
  445. return;
  446. }
  447.  
  448. #ifdef CONFIG_BRICKED_THERMAL
  449. extern int bricked_thermal_throttled;
  450. #endif
  451.  
  452. static void mpdec_input_event(struct input_handle *handle, unsigned int type,
  453. unsigned int code, int value) {
  454. int i = 0;
  455.  
  456. #ifdef CONFIG_BRICKED_THERMAL
  457. if (bricked_thermal_throttled > 0)
  458. return;
  459. #endif
  460.  
  461. if (!msm_mpdec_tuners_ins.boost_enabled)
  462. return;
  463.  
  464. if (!is_screen_on)
  465. return;
  466.  
  467. for_each_online_cpu(i) {
  468. queue_work_on(i, mpdec_input_wq, &per_cpu(mpdec_input_work, i));
  469. }
  470. }
  471.  
  472. static int input_dev_filter(const char *input_dev_name) {
  473. if (strstr(input_dev_name, "touch") ||
  474. strstr(input_dev_name, "key") ||
  475. strstr(input_dev_name, "power") ||
  476. strstr(input_dev_name, "pwr") ||
  477. strstr(input_dev_name, "lid")) {
  478. return 0;
  479. } else {
  480. return 1;
  481. }
  482. }
  483.  
  484. static int mpdec_input_connect(struct input_handler *handler,
  485. struct input_dev *dev, const struct input_device_id *id) {
  486. struct input_handle *handle;
  487. int error;
  488.  
  489. if (input_dev_filter(dev->name))
  490. return -ENODEV;
  491.  
  492. handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  493. if (!handle)
  494. return -ENOMEM;
  495.  
  496. handle->dev = dev;
  497. handle->handler = handler;
  498. handle->name = "mpdec";
  499.  
  500. error = input_register_handle(handle);
  501. if (error)
  502. goto err2;
  503.  
  504. error = input_open_device(handle);
  505. if (error)
  506. goto err1;
  507.  
  508. return 0;
  509. err1:
  510. input_unregister_handle(handle);
  511. err2:
  512. kfree(handle);
  513. return error;
  514. }
  515.  
  516. static void mpdec_input_disconnect(struct input_handle *handle) {
  517. input_close_device(handle);
  518. input_unregister_handle(handle);
  519. kfree(handle);
  520. }
  521.  
  522. static const struct input_device_id mpdec_ids[] = {
  523. { .driver_info = 1 },
  524. { },
  525. };
  526.  
  527. static struct input_handler mpdec_input_handler = {
  528. .event = mpdec_input_event,
  529. .connect = mpdec_input_connect,
  530. .disconnect = mpdec_input_disconnect,
  531. .name = "mpdec_inputreq",
  532. .id_table = mpdec_ids,
  533. };
  534. #endif
  535.  
  536. static void msm_mpdec_suspend(struct work_struct * msm_mpdec_suspend_work) {
  537. int cpu = nr_cpu_ids;
  538. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  539. is_screen_on = false;
  540. #endif
  541.  
  542. if (!msm_mpdec_tuners_ins.scroff_single_core) {
  543. pr_info(MPDEC_TAG"Screen -> off\n");
  544. return;
  545. }
  546.  
  547. /* main work thread can sleep now */
  548. cancel_delayed_work_sync(&msm_mpdec_work);
  549.  
  550. for_each_possible_cpu(cpu) {
  551. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  552. unboost_cpu(cpu);
  553. #endif
  554. if ((cpu >= 1) && (cpu_online(cpu))) {
  555. mpdec_cpu_down(cpu);
  556. }
  557. }
  558. mpdec_suspended = true;
  559.  
  560. pr_info(MPDEC_TAG"Screen -> off. Deactivated mpdecision.\n");
  561. }
  562. static DECLARE_WORK(msm_mpdec_suspend_work, msm_mpdec_suspend);
  563.  
  564. static void msm_mpdec_resume(struct work_struct * msm_mpdec_suspend_work) {
  565. int cpu = nr_cpu_ids;
  566. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  567. is_screen_on = true;
  568. #endif
  569.  
  570. if (!mpdec_suspended)
  571. return;
  572.  
  573. mpdec_suspended = false;
  574.  
  575. if (msm_mpdec_tuners_ins.scroff_single_core) {
  576. /* wake up main work thread */
  577. was_paused = true;
  578. queue_delayed_work(msm_mpdec_workq, &msm_mpdec_work, 0);
  579. /* restore min/max cpus limits */
  580. for (cpu=1; cpu<CONFIG_NR_CPUS; cpu++) {
  581. if (cpu < msm_mpdec_tuners_ins.min_cpus) {
  582. if (!cpu_online(cpu))
  583. mpdec_cpu_up(cpu);
  584. } else if (cpu > msm_mpdec_tuners_ins.max_cpus) {
  585. if (cpu_online(cpu))
  586. mpdec_cpu_down(cpu);
  587. }
  588. }
  589. pr_info(MPDEC_TAG"Screen -> on. Activated mpdecision. | Mask=[%d%d%d%d]\n",
  590. cpu_online(0), cpu_online(1), cpu_online(2), cpu_online(3));
  591. } else {
  592. pr_info(MPDEC_TAG"Screen -> on\n");
  593. }
  594. }
  595. static DECLARE_WORK(msm_mpdec_resume_work, msm_mpdec_resume);
  596.  
  597. #ifndef CONFIG_HAS_EARLYSUSPEND
  598. static int msm_mpdec_lcd_notifier_callback(struct notifier_block *this,
  599. unsigned long event, void *data) {
  600. pr_debug("%s: event = %lu\n", __func__, event);
  601.  
  602. switch (event) {
  603. case LCD_EVENT_OFF_START:
  604. mutex_lock(&mpdec_msm_susres_lock);
  605. schedule_work(&msm_mpdec_suspend_work);
  606. break;
  607. case LCD_EVENT_ON_START:
  608. mutex_lock(&mpdec_msm_susres_lock);
  609. schedule_work(&msm_mpdec_resume_work);
  610. break;
  611. case LCD_EVENT_OFF_END:
  612. mutex_unlock(&mpdec_msm_susres_lock);
  613. break;
  614. case LCD_EVENT_ON_END:
  615. mutex_unlock(&mpdec_msm_susres_lock);
  616. break;
  617. default:
  618. break;
  619. }
  620.  
  621. return 0;
  622. }
  623. #else
  624. static void msm_mpdec_early_suspend(struct early_suspend *h) {
  625. mutex_lock(&mpdec_msm_susres_lock);
  626. schedule_work(&msm_mpdec_suspend_work);
  627. mutex_unlock(&mpdec_msm_susres_lock);
  628. }
  629.  
  630. static void msm_mpdec_late_resume(struct early_suspend *h) {
  631. mutex_lock(&mpdec_msm_susres_lock);
  632. schedule_work(&msm_mpdec_resume_work);
  633. mutex_unlock(&mpdec_msm_susres_lock);
  634. }
  635.  
  636. static struct early_suspend msm_mpdec_early_suspend_handler = {
  637. .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN,
  638. .suspend = msm_mpdec_early_suspend,
  639. .resume = msm_mpdec_late_resume,
  640. };
  641. #endif
  642.  
  643. /**************************** SYSFS START ****************************/
  644. struct kobject *msm_mpdec_kobject;
  645.  
  646. #define show_one(file_name, object) \
  647. static ssize_t show_##file_name \
  648. (struct kobject *kobj, struct attribute *attr, char *buf) \
  649. { \
  650. return sprintf(buf, "%u\n", msm_mpdec_tuners_ins.object); \
  651. }
  652.  
  653. show_one(startdelay, startdelay);
  654. show_one(delay, delay);
  655. show_one(pause, pause);
  656. show_one(scroff_single_core, scroff_single_core);
  657. show_one(min_cpus, min_cpus);
  658. show_one(max_cpus, max_cpus);
  659. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  660. show_one(boost_enabled, boost_enabled);
  661. show_one(boost_time, boost_time);
  662. #endif
  663.  
  664. #define show_one_twts(file_name, arraypos) \
  665. static ssize_t show_##file_name \
  666. (struct kobject *kobj, struct attribute *attr, char *buf) \
  667. { \
  668. return sprintf(buf, "%u\n", TwTs_Threshold[arraypos]); \
  669. }
  670. show_one_twts(twts_threshold_0, 0);
  671. show_one_twts(twts_threshold_1, 1);
  672. show_one_twts(twts_threshold_2, 2);
  673. show_one_twts(twts_threshold_3, 3);
  674. show_one_twts(twts_threshold_4, 4);
  675. show_one_twts(twts_threshold_5, 5);
  676. show_one_twts(twts_threshold_6, 6);
  677. show_one_twts(twts_threshold_7, 7);
  678.  
  679. #define store_one_twts(file_name, arraypos) \
  680. static ssize_t store_##file_name \
  681. (struct kobject *a, struct attribute *b, const char *buf, size_t count) \
  682. { \
  683. unsigned int input; \
  684. int ret; \
  685. ret = sscanf(buf, "%u", &input); \
  686. if (ret != 1) \
  687. return -EINVAL; \
  688. TwTs_Threshold[arraypos] = input; \
  689. return count; \
  690. } \
  691. define_one_global_rw(file_name);
  692. store_one_twts(twts_threshold_0, 0);
  693. store_one_twts(twts_threshold_1, 1);
  694. store_one_twts(twts_threshold_2, 2);
  695. store_one_twts(twts_threshold_3, 3);
  696. store_one_twts(twts_threshold_4, 4);
  697. store_one_twts(twts_threshold_5, 5);
  698. store_one_twts(twts_threshold_6, 6);
  699. store_one_twts(twts_threshold_7, 7);
  700.  
  701. #define show_one_nwns(file_name, arraypos) \
  702. static ssize_t show_##file_name \
  703. (struct kobject *kobj, struct attribute *attr, char *buf) \
  704. { \
  705. return sprintf(buf, "%u\n", NwNs_Threshold[arraypos]); \
  706. }
  707. show_one_nwns(nwns_threshold_0, 0);
  708. show_one_nwns(nwns_threshold_1, 1);
  709. show_one_nwns(nwns_threshold_2, 2);
  710. show_one_nwns(nwns_threshold_3, 3);
  711. show_one_nwns(nwns_threshold_4, 4);
  712. show_one_nwns(nwns_threshold_5, 5);
  713. show_one_nwns(nwns_threshold_6, 6);
  714. show_one_nwns(nwns_threshold_7, 7);
  715.  
  716. #define store_one_nwns(file_name, arraypos) \
  717. static ssize_t store_##file_name \
  718. (struct kobject *a, struct attribute *b, const char *buf, size_t count) \
  719. { \
  720. unsigned int input; \
  721. int ret; \
  722. ret = sscanf(buf, "%u", &input); \
  723. if (ret != 1) \
  724. return -EINVAL; \
  725. NwNs_Threshold[arraypos] = input; \
  726. return count; \
  727. } \
  728. define_one_global_rw(file_name);
  729. store_one_nwns(nwns_threshold_0, 0);
  730. store_one_nwns(nwns_threshold_1, 1);
  731. store_one_nwns(nwns_threshold_2, 2);
  732. store_one_nwns(nwns_threshold_3, 3);
  733. store_one_nwns(nwns_threshold_4, 4);
  734. store_one_nwns(nwns_threshold_5, 5);
  735. store_one_nwns(nwns_threshold_6, 6);
  736. store_one_nwns(nwns_threshold_7, 7);
  737.  
  738. static ssize_t show_idle_freq (struct kobject *kobj, struct attribute *attr,
  739. char *buf)
  740. {
  741. return sprintf(buf, "%lu\n", msm_mpdec_tuners_ins.idle_freq);
  742. }
  743.  
  744. static ssize_t show_enabled(struct kobject *a, struct attribute *b,
  745. char *buf)
  746. {
  747. unsigned int enabled;
  748. switch (state) {
  749. case MSM_MPDEC_DISABLED:
  750. enabled = 0;
  751. break;
  752. case MSM_MPDEC_IDLE:
  753. case MSM_MPDEC_DOWN:
  754. case MSM_MPDEC_UP:
  755. enabled = 1;
  756. break;
  757. default:
  758. enabled = 333;
  759. }
  760. return sprintf(buf, "%u\n", enabled);
  761. }
  762.  
  763. static ssize_t store_startdelay(struct kobject *a, struct attribute *b,
  764. const char *buf, size_t count)
  765. {
  766. unsigned int input;
  767. int ret;
  768. ret = sscanf(buf, "%u", &input);
  769. if (ret != 1)
  770. return -EINVAL;
  771.  
  772. msm_mpdec_tuners_ins.startdelay = input;
  773.  
  774. return count;
  775. }
  776.  
  777. static ssize_t store_delay(struct kobject *a, struct attribute *b,
  778. const char *buf, size_t count)
  779. {
  780. unsigned int input;
  781. int ret;
  782. ret = sscanf(buf, "%u", &input);
  783. if (ret != 1)
  784. return -EINVAL;
  785.  
  786. msm_mpdec_tuners_ins.delay = input;
  787.  
  788. return count;
  789. }
  790.  
  791. static ssize_t store_pause(struct kobject *a, struct attribute *b,
  792. const char *buf, size_t count)
  793. {
  794. unsigned int input;
  795. int ret;
  796. ret = sscanf(buf, "%u", &input);
  797. if (ret != 1)
  798. return -EINVAL;
  799.  
  800. msm_mpdec_tuners_ins.pause = input;
  801.  
  802. return count;
  803. }
  804.  
  805. static ssize_t store_idle_freq(struct kobject *a, struct attribute *b,
  806. const char *buf, size_t count)
  807. {
  808. long unsigned int input;
  809. int ret;
  810. ret = sscanf(buf, "%lu", &input);
  811. if (ret != 1)
  812. return -EINVAL;
  813.  
  814. msm_mpdec_tuners_ins.idle_freq = input;
  815.  
  816. return count;
  817. }
  818.  
  819. static ssize_t store_scroff_single_core(struct kobject *a, struct attribute *b,
  820. const char *buf, size_t count)
  821. {
  822. unsigned int input;
  823. int ret;
  824. ret = sscanf(buf, "%u", &input);
  825. if (ret != 1)
  826. return -EINVAL;
  827. switch (buf[0]) {
  828. case '0':
  829. case '1':
  830. msm_mpdec_tuners_ins.scroff_single_core = input;
  831. break;
  832. default:
  833. ret = -EINVAL;
  834. }
  835. return count;
  836. }
  837.  
  838. static ssize_t store_max_cpus(struct kobject *a, struct attribute *b,
  839. const char *buf, size_t count)
  840. {
  841. unsigned int input;
  842. int ret, cpu;
  843. ret = sscanf(buf, "%u", &input);
  844. if ((ret != 1) || input > CONFIG_NR_CPUS || input < msm_mpdec_tuners_ins.min_cpus)
  845. return -EINVAL;
  846.  
  847. msm_mpdec_tuners_ins.max_cpus = input;
  848. if (num_online_cpus() > input) {
  849. for (cpu=CONFIG_NR_CPUS; cpu>0; cpu--) {
  850. if (num_online_cpus() <= input)
  851. break;
  852. if (!cpu_online(cpu))
  853. continue;
  854. mpdec_cpu_down(cpu);
  855. }
  856. pr_info(MPDEC_TAG"max_cpus set to %u. Affected CPUs were unplugged!\n", input);
  857. }
  858.  
  859. return count;
  860. }
  861.  
  862. static ssize_t store_min_cpus(struct kobject *a, struct attribute *b,
  863. const char *buf, size_t count)
  864. {
  865. unsigned int input;
  866. int ret, cpu;
  867. ret = sscanf(buf, "%u", &input);
  868. if ((ret != 1) || input < 1 || input > msm_mpdec_tuners_ins.max_cpus)
  869. return -EINVAL;
  870.  
  871. msm_mpdec_tuners_ins.min_cpus = input;
  872. if (num_online_cpus() < input) {
  873. for (cpu=1; cpu<CONFIG_NR_CPUS; cpu++) {
  874. if (num_online_cpus() >= input)
  875. break;
  876. if (cpu_online(cpu))
  877. continue;
  878. mpdec_cpu_up(cpu);
  879. }
  880. pr_info(MPDEC_TAG"min_cpus set to %u. Affected CPUs were hotplugged!\n", input);
  881. }
  882.  
  883. return count;
  884. }
  885.  
  886. static ssize_t store_enabled(struct kobject *a, struct attribute *b,
  887. const char *buf, size_t count)
  888. {
  889. unsigned int cpu, input, enabled;
  890. int ret;
  891. ret = sscanf(buf, "%u", &input);
  892. if (ret != 1)
  893. return -EINVAL;
  894.  
  895. switch (state) {
  896. case MSM_MPDEC_DISABLED:
  897. enabled = 0;
  898. break;
  899. case MSM_MPDEC_IDLE:
  900. case MSM_MPDEC_DOWN:
  901. case MSM_MPDEC_UP:
  902. enabled = 1;
  903. break;
  904. default:
  905. enabled = 333;
  906. }
  907.  
  908. if (buf[0] == enabled)
  909. return -EINVAL;
  910.  
  911. switch (buf[0]) {
  912. case '0':
  913. state = MSM_MPDEC_DISABLED;
  914. pr_info(MPDEC_TAG"nap time... Hot plugging offline CPUs...\n");
  915. for (cpu = 1; cpu < CONFIG_NR_CPUS; cpu++)
  916. if (!cpu_online(cpu))
  917. mpdec_cpu_up(cpu);
  918. break;
  919. case '1':
  920. state = MSM_MPDEC_IDLE;
  921. was_paused = true;
  922. queue_delayed_work(msm_mpdec_workq, &msm_mpdec_work,
  923. msecs_to_jiffies(msm_mpdec_tuners_ins.delay));
  924. pr_info(MPDEC_TAG"firing up mpdecision...\n");
  925. break;
  926. default:
  927. ret = -EINVAL;
  928. }
  929. return count;
  930. }
  931.  
  932. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  933. static ssize_t store_boost_enabled(struct kobject *a, struct attribute *b,
  934. const char *buf, size_t count)
  935. {
  936. unsigned int input;
  937. int ret;
  938. ret = sscanf(buf, "%u", &input);
  939. if (ret != 1)
  940. return -EINVAL;
  941.  
  942. msm_mpdec_tuners_ins.boost_enabled = input;
  943.  
  944. return count;
  945. }
  946.  
  947. static ssize_t store_boost_time(struct kobject *a, struct attribute *b,
  948. const char *buf, size_t count)
  949. {
  950. unsigned int input;
  951. int ret;
  952. ret = sscanf(buf, "%u", &input);
  953. if (ret != 1)
  954. return -EINVAL;
  955.  
  956. msm_mpdec_tuners_ins.boost_time = input;
  957.  
  958. return count;
  959. }
  960.  
  961. static ssize_t show_boost_freqs(struct kobject *a, struct attribute *b,
  962. char *buf)
  963. {
  964. ssize_t len = 0;
  965. int cpu = 0;
  966.  
  967. for_each_present_cpu(cpu) {
  968. len += sprintf(buf + len, "%lu\n", per_cpu(msm_mpdec_cpudata, cpu).boost_freq);
  969. }
  970. return len;
  971. }
  972. static ssize_t store_boost_freqs(struct kobject *a, struct attribute *b,
  973. const char *buf, size_t count)
  974. {
  975. int i = 0;
  976. unsigned int cpu = 0;
  977. long unsigned int hz = 0;
  978. const char *chz = NULL;
  979.  
  980. for (i=0; i<count; i++) {
  981. if (buf[i] == ' ') {
  982. sscanf(&buf[(i-1)], "%u", &cpu);
  983. chz = &buf[(i+1)];
  984. }
  985. }
  986. sscanf(chz, "%lu", &hz);
  987.  
  988. /* if this cpu is currently boosted, unboost */
  989. unboost_cpu(cpu);
  990.  
  991. /* update boost freq */
  992. per_cpu(msm_mpdec_cpudata, cpu).boost_freq = hz;
  993.  
  994. return count;
  995. }
  996. define_one_global_rw(boost_freqs);
  997. #endif
  998.  
  999. define_one_global_rw(startdelay);
  1000. define_one_global_rw(delay);
  1001. define_one_global_rw(pause);
  1002. define_one_global_rw(scroff_single_core);
  1003. define_one_global_rw(idle_freq);
  1004. define_one_global_rw(min_cpus);
  1005. define_one_global_rw(max_cpus);
  1006. define_one_global_rw(enabled);
  1007. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  1008. define_one_global_rw(boost_enabled);
  1009. define_one_global_rw(boost_time);
  1010. #endif
  1011.  
  1012. static struct attribute *msm_mpdec_attributes[] = {
  1013. &startdelay.attr,
  1014. &delay.attr,
  1015. &pause.attr,
  1016. &scroff_single_core.attr,
  1017. &idle_freq.attr,
  1018. &min_cpus.attr,
  1019. &max_cpus.attr,
  1020. &enabled.attr,
  1021. &twts_threshold_0.attr,
  1022. &twts_threshold_1.attr,
  1023. &twts_threshold_2.attr,
  1024. &twts_threshold_3.attr,
  1025. &twts_threshold_4.attr,
  1026. &twts_threshold_5.attr,
  1027. &twts_threshold_6.attr,
  1028. &twts_threshold_7.attr,
  1029. &nwns_threshold_0.attr,
  1030. &nwns_threshold_1.attr,
  1031. &nwns_threshold_2.attr,
  1032. &nwns_threshold_3.attr,
  1033. &nwns_threshold_4.attr,
  1034. &nwns_threshold_5.attr,
  1035. &nwns_threshold_6.attr,
  1036. &nwns_threshold_7.attr,
  1037. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  1038. &boost_freqs.attr,
  1039. &boost_enabled.attr,
  1040. &boost_time.attr,
  1041. #endif
  1042. NULL
  1043. };
  1044.  
  1045.  
  1046. static struct attribute_group msm_mpdec_attr_group = {
  1047. .attrs = msm_mpdec_attributes,
  1048. .name = "conf",
  1049. };
  1050.  
  1051. /********* STATS START *********/
  1052.  
  1053. static ssize_t show_time_cpus_on(struct kobject *a, struct attribute *b,
  1054. char *buf)
  1055. {
  1056. ssize_t len = 0;
  1057. int cpu = 0;
  1058.  
  1059. for_each_possible_cpu(cpu) {
  1060. if (cpu_online(cpu)) {
  1061. len += sprintf(
  1062. buf + len, "%i %llu\n", cpu,
  1063. (per_cpu(msm_mpdec_cpudata, cpu).on_time_total +
  1064. (ktime_to_ms(ktime_get()) -
  1065. per_cpu(msm_mpdec_cpudata, cpu).on_time))
  1066. );
  1067. } else
  1068. len += sprintf(buf + len, "%i %llu\n", cpu, per_cpu(msm_mpdec_cpudata, cpu).on_time_total);
  1069. }
  1070.  
  1071. return len;
  1072. }
  1073. define_one_global_ro(time_cpus_on);
  1074.  
  1075. static ssize_t show_times_cpus_hotplugged(struct kobject *a, struct attribute *b,
  1076. char *buf)
  1077. {
  1078. ssize_t len = 0;
  1079. int cpu = 0;
  1080.  
  1081. for_each_possible_cpu(cpu) {
  1082. len += sprintf(buf + len, "%i %llu\n", cpu, per_cpu(msm_mpdec_cpudata, cpu).times_cpu_hotplugged);
  1083. }
  1084.  
  1085. return len;
  1086. }
  1087. define_one_global_ro(times_cpus_hotplugged);
  1088.  
  1089. static ssize_t show_times_cpus_unplugged(struct kobject *a, struct attribute *b,
  1090. char *buf)
  1091. {
  1092. ssize_t len = 0;
  1093. int cpu = 0;
  1094.  
  1095. for_each_possible_cpu(cpu) {
  1096. len += sprintf(buf + len, "%i %llu\n", cpu, per_cpu(msm_mpdec_cpudata, cpu).times_cpu_unplugged);
  1097. }
  1098.  
  1099. return len;
  1100. }
  1101. define_one_global_ro(times_cpus_unplugged);
  1102.  
  1103. static struct attribute *msm_mpdec_stats_attributes[] = {
  1104. &time_cpus_on.attr,
  1105. &times_cpus_hotplugged.attr,
  1106. &times_cpus_unplugged.attr,
  1107. NULL
  1108. };
  1109.  
  1110.  
  1111. static struct attribute_group msm_mpdec_stats_attr_group = {
  1112. .attrs = msm_mpdec_stats_attributes,
  1113. .name = "stats",
  1114. };
  1115. /**************************** SYSFS END ****************************/
  1116.  
  1117. static int __init msm_mpdec_init(void) {
  1118. int cpu, rc, err = 0;
  1119. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  1120. int i;
  1121. unsigned long int boost_freq = 0;
  1122. #endif
  1123.  
  1124. mpdec_suspended = false;
  1125. for_each_possible_cpu(cpu) {
  1126. mutex_init(&(per_cpu(msm_mpdec_cpudata, cpu).hotplug_mutex));
  1127. per_cpu(msm_mpdec_cpudata, cpu).online = true;
  1128. per_cpu(msm_mpdec_cpudata, cpu).on_time_total = 0;
  1129. per_cpu(msm_mpdec_cpudata, cpu).times_cpu_unplugged = 0;
  1130. per_cpu(msm_mpdec_cpudata, cpu).times_cpu_hotplugged = 0;
  1131. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  1132. per_cpu(msm_mpdec_cpudata, cpu).norm_min_freq = CONFIG_MSM_CPU_FREQ_MIN;
  1133. switch (cpu) {
  1134. case 0:
  1135. case 1:
  1136. case 2:
  1137. boost_freq = msm_mpdec_tuners_ins.boost_freq[cpu];
  1138. break;
  1139. default:
  1140. boost_freq = msm_mpdec_tuners_ins.boost_freq[3];
  1141. break;
  1142. }
  1143. per_cpu(msm_mpdec_cpudata, cpu).boost_freq = boost_freq;
  1144. per_cpu(msm_mpdec_cpudata, cpu).is_boosted = false;
  1145. per_cpu(msm_mpdec_cpudata, cpu).revib_wq_running = false;
  1146. per_cpu(msm_mpdec_cpudata, cpu).boost_until = 0;
  1147. mutex_init(&(per_cpu(msm_mpdec_cpudata, cpu).boost_mutex));
  1148. mutex_init(&(per_cpu(msm_mpdec_cpudata, cpu).unboost_mutex));
  1149. #endif
  1150. }
  1151.  
  1152. was_paused = true;
  1153.  
  1154. msm_mpdec_workq = alloc_workqueue(
  1155. "mpdec",
  1156. WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE,
  1157. 1
  1158. );
  1159. if (!msm_mpdec_workq)
  1160. return -ENOMEM;
  1161. INIT_DELAYED_WORK(&msm_mpdec_work, msm_mpdec_work_thread);
  1162.  
  1163. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  1164. mpdec_input_wq = create_workqueue("mpdeciwq");
  1165. if (!mpdec_input_wq) {
  1166. printk(KERN_ERR "%s: Failed to create mpdeciwq workqueue\n", __func__);
  1167. return -EFAULT;
  1168. }
  1169. msm_mpdec_revib_workq = create_workqueue("mpdecribwq");
  1170. if (!msm_mpdec_revib_workq) {
  1171. printk(KERN_ERR "%s: Failed to create mpdecrevibwq workqueue\n", __func__);
  1172. return -EFAULT;
  1173. }
  1174. for_each_possible_cpu(i) {
  1175. INIT_WORK(&per_cpu(mpdec_input_work, i), mpdec_input_callback);
  1176. INIT_DELAYED_WORK(&per_cpu(msm_mpdec_revib_work, i), msm_mpdec_revib_work_thread);
  1177. }
  1178. rc = input_register_handler(&mpdec_input_handler);
  1179. #endif
  1180.  
  1181. if (state != MSM_MPDEC_DISABLED)
  1182. queue_delayed_work(msm_mpdec_workq, &msm_mpdec_work,
  1183. msecs_to_jiffies(msm_mpdec_tuners_ins.startdelay));
  1184.  
  1185. msm_mpdec_kobject = kobject_create_and_add("msm_mpdecision", kernel_kobj);
  1186. if (msm_mpdec_kobject) {
  1187. rc = sysfs_create_group(msm_mpdec_kobject,
  1188. &msm_mpdec_attr_group);
  1189. if (rc) {
  1190. pr_warn(MPDEC_TAG"sysfs: ERROR, could not create sysfs group");
  1191. }
  1192. rc = sysfs_create_group(msm_mpdec_kobject,
  1193. &msm_mpdec_stats_attr_group);
  1194. if (rc) {
  1195. pr_warn(MPDEC_TAG"sysfs: ERROR, could not create sysfs stats group");
  1196. }
  1197. } else
  1198. pr_warn(MPDEC_TAG"sysfs: ERROR, could not create sysfs kobj");
  1199.  
  1200. pr_info(MPDEC_TAG"%s init complete.", __func__);
  1201.  
  1202.  
  1203. #ifndef CONFIG_HAS_EARLYSUSPEND
  1204. msm_mpdec_lcd_notif.notifier_call = msm_mpdec_lcd_notifier_callback;
  1205. if (lcd_register_client(&msm_mpdec_lcd_notif) != 0) {
  1206. pr_err("%s: Failed to register lcd callback\n", __func__);
  1207. err = -EINVAL;
  1208. lcd_unregister_client(&msm_mpdec_lcd_notif);
  1209. }
  1210. #else
  1211. register_early_suspend(&msm_mpdec_early_suspend_handler);
  1212. #endif
  1213.  
  1214. return err;
  1215. }
  1216. late_initcall(msm_mpdec_init);
  1217.  
  1218. void msm_mpdec_exit(void) {
  1219. #ifndef CONFIG_HAS_EARLYSUSPEND
  1220. lcd_unregister_client(&msm_mpdec_lcd_notif);
  1221. #else
  1222. #ifdef CONFIG_MSM_MPDEC_INPUTBOOST_CPUMIN
  1223. input_unregister_handler(&mpdec_input_handler);
  1224. destroy_workqueue(msm_mpdec_revib_workq);
  1225. destroy_workqueue(mpdec_input_wq);
  1226. #endif
  1227. destroy_workqueue(msm_mpdec_workq);
  1228. #endif
  1229. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement