Advertisement
Guest User

Untitled

a guest
Apr 10th, 2010
144
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 29.31 KB | None | 0 0
  1. /* arch/arm/mach-msm/acpuclock.c
  2. *
  3. * MSM architecture clock driver
  4. *
  5. * Copyright (C) 2007 Google, Inc.
  6. * Copyright (c) 2007 QUALCOMM Incorporated
  7. * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
  8. * Author: San Mehat <san@android.com>
  9. *
  10. * This software is licensed under the terms of the GNU General Public
  11. * License version 2, as published by the Free Software Foundation, and
  12. * may be copied, distributed, and modified under those terms.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. */
  20.  
  21. #include <linux/version.h>
  22. #include <linux/kernel.h>
  23. #include <linux/init.h>
  24. #include <linux/errno.h>
  25. #include <linux/string.h>
  26. #include <linux/delay.h>
  27. #include <linux/clk.h>
  28. #include <linux/cpufreq.h>
  29. #include <linux/mutex.h>
  30. #include <linux/io.h>
  31. #include <linux/sort.h>
  32. #include <mach/board.h>
  33. #include <mach/msm_iomap.h>
  34. #include <asm/mach-types.h>
  35. #include <linux/debugfs.h>
  36. #include <linux/poll.h>
  37.  
  38. #include "mach/proc_comm.h"
  39. #include "acpuclock.h"
  40.  
  41. #define PERF_SWITCH_DEBUG 0
  42. #define PERF_SWITCH_STEP_DEBUG 0
  43.  
  44. static int oc_freq_khz = 550000;
  45.  
  46. struct clock_state
  47. {
  48. struct clkctl_acpu_speed *current_speed;
  49. struct mutex lock;
  50. uint32_t acpu_switch_time_us;
  51. uint32_t max_speed_delta_khz;
  52. uint32_t vdd_switch_time_us;
  53. unsigned long power_collapse_khz;
  54. unsigned long wait_for_irq_khz;
  55. unsigned int max_axi_khz;
  56. };
  57.  
  58. struct clkctl_acpu_speed {
  59. unsigned int use_for_scaling;
  60. unsigned int a11clk_khz;
  61. int pll;
  62. unsigned int a11clk_src_sel;
  63. unsigned int a11clk_src_div;
  64. unsigned int ahbclk_khz;
  65. unsigned int ahbclk_div;
  66. unsigned int axiclk_khz;
  67. int vdd;
  68. unsigned long lpj; /* loops_per_jiffy */
  69. /* Index in acpu_freq_tbl[] for steppings. */
  70. short down;
  71. short up;
  72. };
  73.  
  74. static struct clk *ebi1_clk;
  75. static struct clock_state drv_state = { 0 };
  76. static struct clkctl_acpu_speed *acpu_freq_tbl;
  77.  
  78. static void __init acpuclk_init(void);
  79.  
  80. /* MSM7201A Levels 3-6 all correspond to 1.2V, level 7 corresponds to 1.325V. */
  81.  
  82. /*
  83. * ACPU freq tables used for different PLLs frequency combinations. The
  84. * correct table is selected during init.
  85. *
  86. * Table stepping up/down is calculated during boot to choose the largest
  87. * frequency jump that's less than max_speed_delta_khz and preferrably on the
  88. * same PLL. If no frequencies using the same PLL are within
  89. * max_speed_delta_khz, then the farthest frequency that is within
  90. * max_speed_delta_khz is chosen.
  91. */
  92.  
  93. /* 7x01/7x25 normal with GSM capable modem */
  94. static struct clkctl_acpu_speed pll0_245_pll1_768_pll2_1056[] = {
  95. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 30720, 0 },
  96. { 0, 122880, ACPU_PLL_0, 4, 1, 61440, 1, 61440, 0 },
  97. { 1, 128000, ACPU_PLL_1, 1, 5, 64000, 1, 61440, 0 },
  98. { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 61440, 3 },
  99. { 0, 245760, ACPU_PLL_0, 4, 0, 81920, 2, 61440, 4 },
  100. { 1, 256000, ACPU_PLL_1, 1, 2, 128000, 1, 128000, 5 },
  101. { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 128000, 5 },
  102. { 1, 384000, ACPU_PLL_1, 1, 1, 128000, 2, 128000, 6 },
  103. { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 128000, 7 },
  104. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  105. };
  106.  
  107. /* 7x01/7x25 normal with CDMA-only modem */
  108. static struct clkctl_acpu_speed pll0_196_pll1_768_pll2_1056[] = {
  109. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 24576, 0 },
  110. { 0, 98304, ACPU_PLL_0, 4, 1, 49152, 1, 24576, 0 },
  111. { 1, 128000, ACPU_PLL_1, 1, 5, 64000, 1, 24576, 0 },
  112. { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 24576, 3 },
  113. { 0, 196608, ACPU_PLL_0, 4, 0, 65536, 2, 24576, 4 },
  114. { 1, 256000, ACPU_PLL_1, 1, 2, 128000, 1, 128000, 5 },
  115. { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 128000, 5 },
  116. { 1, 384000, ACPU_PLL_1, 1, 1, 128000, 2, 128000, 6 },
  117. { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 128000, 7 },
  118. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  119. };
  120.  
  121. /* 7x01/7x25 turbo with GSM capable modem */
  122. static struct clkctl_acpu_speed pll0_245_pll1_960_pll2_1056[] = {
  123. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 30720, 0 },
  124. { 0, 120000, ACPU_PLL_1, 1, 7, 60000, 1, 61440, 0 },
  125. { 1, 122880, ACPU_PLL_0, 4, 1, 61440, 1, 61440, 0 },
  126. { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 61440, 3 },
  127. { 1, 245760, ACPU_PLL_0, 4, 0, 81920, 2, 61440, 4 },
  128. { 1, 320000, ACPU_PLL_1, 1, 2, 107000, 2, 120000, 5 },
  129. { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 120000, 5 },
  130. { 1, 480000, ACPU_PLL_1, 1, 1, 120000, 3, 120000, 6 },
  131. { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 122880, 7 },
  132. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  133. };
  134.  
  135. /* 7x01/7x25 turbo with CDMA-only modem */
  136. #if defined(CONFIG_TURBO_MODE)
  137. static struct clkctl_acpu_speed pll0_196_pll1_960_pll2_1056[] = {
  138. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 30720, 0 },
  139. { 1, 122880, ACPU_PLL_0, 4, 1, 61440, 1, 61440, 0 },
  140. { 0, 160000, ACPU_PLL_1, 1, 7, 64000, 1, 61440, 0 },
  141. { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 61440, 3 },
  142. { 1, 245760, ACPU_PLL_0, 4, 0, 81920, 2, 61440, 4 },
  143. { 1, 320000, ACPU_PLL_1, 1, 2, 107000, 2, 120000, 5 },
  144. { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 128000, 5 },
  145. { 1, 480000, ACPU_PLL_1, 1, 1, 120000, 3, 120000, 6 },
  146. { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 160000, 7 },
  147. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  148. };
  149. #endif
  150.  
  151. /* 7x27 normal with GSM capable modem */
  152. static struct clkctl_acpu_speed pll0_245_pll1_960_pll2_1200[] = {
  153. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 30720, 0 },
  154. { 0, 120000, ACPU_PLL_1, 1, 7, 60000, 1, 61440, 3 },
  155. { 1, 122880, ACPU_PLL_0, 4, 1, 61440, 1, 61440, 3 },
  156. { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 61440, 4 },
  157. { 1, 245760, ACPU_PLL_0, 4, 0, 122880, 1, 122880, 4 },
  158. { 1, 320000, ACPU_PLL_1, 1, 2, 160000, 1, 122880, 5 },
  159. { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 122880, 5 },
  160. { 1, 480000, ACPU_PLL_1, 1, 1, 160000, 2, 122880, 6 },
  161. { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 122880, 7 },
  162. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  163. };
  164.  
  165. /* 7x27 normal with CDMA-only modem */
  166. static struct clkctl_acpu_speed pll0_196_pll1_960_pll2_1200[] = {
  167. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 24576, 0 },
  168. { 1, 98304, ACPU_PLL_0, 4, 1, 98304, 0, 49152, 3 },
  169. { 0, 120000, ACPU_PLL_1, 1, 7, 60000, 1, 49152, 3 },
  170. { 1, 196608, ACPU_PLL_0, 4, 0, 65536, 2, 98304, 4 },
  171. { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 98304, 4 },
  172. { 1, 320000, ACPU_PLL_1, 1, 2, 160000, 1, 120000, 5 },
  173. { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 120000, 5 },
  174. { 1, 480000, ACPU_PLL_1, 1, 1, 160000, 2, 120000, 6 },
  175. { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 120000, 7 },
  176. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  177. };
  178.  
  179. /* 7x27 normal with GSM capable modem - PLL0 and PLL1 swapped */
  180. static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200[] = {
  181. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 30720, 0 },
  182. { 0, 120000, ACPU_PLL_0, 4, 7, 60000, 1, 61440, 3 },
  183. { 1, 122880, ACPU_PLL_1, 1, 1, 61440, 1, 61440, 3 },
  184. { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 61440, 4 },
  185. { 1, 245760, ACPU_PLL_1, 1, 0, 122880, 1, 122880, 4 },
  186. { 1, 320000, ACPU_PLL_0, 4, 2, 160000, 1, 122880, 5 },
  187. { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 122880, 5 },
  188. { 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 122880, 6 },
  189. { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 122880, 7 },
  190. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  191. };
  192.  
  193. /* 7x27 normal with CDMA-only modem - PLL0 and PLL1 swapped */
  194. static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200[] = {
  195. { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 24576, 0 },
  196. { 1, 98304, ACPU_PLL_1, 1, 1, 98304, 0, 49152, 3 },
  197. { 0, 120000, ACPU_PLL_0, 4, 7, 60000, 1, 49152, 3 },
  198. { 1, 196608, ACPU_PLL_1, 1, 0, 65536, 2, 98304, 4 },
  199. { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 98304, 4 },
  200. { 1, 320000, ACPU_PLL_0, 4, 2, 160000, 1, 120000, 5 },
  201. { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 120000, 5 },
  202. { 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 120000, 6 },
  203. { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 120000, 7 },
  204. { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
  205. };
  206.  
  207. #define PLL_196_MHZ 10
  208. #define PLL_245_MHZ 12
  209. #define PLL_491_MHZ 25
  210. #define PLL_768_MHZ 40
  211. #define PLL_960_MHZ 50
  212. #define PLL_1056_MHZ 55
  213. #define PLL_1200_MHZ 62
  214.  
  215. #define PLL_CONFIG(m0, m1, m2) { \
  216. PLL_##m0##_MHZ, PLL_##m1##_MHZ, PLL_##m2##_MHZ, \
  217. pll0_##m0##_pll1_##m1##_pll2_##m2 \
  218. }
  219.  
  220. struct pll_freq_tbl_map {
  221. unsigned int pll0_l;
  222. unsigned int pll1_l;
  223. unsigned int pll2_l;
  224. struct clkctl_acpu_speed *tbl;
  225. };
  226.  
  227. static struct pll_freq_tbl_map acpu_freq_tbl_list[] = {
  228. PLL_CONFIG(196, 768, 1056),
  229. PLL_CONFIG(245, 768, 1056),
  230. PLL_CONFIG(196, 960, 1056),
  231. PLL_CONFIG(245, 960, 1056),
  232. PLL_CONFIG(196, 960, 1200),
  233. PLL_CONFIG(245, 960, 1200),
  234. PLL_CONFIG(960, 196, 1200),
  235. PLL_CONFIG(960, 245, 1200),
  236. { 0, 0, 0, 0 }
  237. };
  238.  
  239. const uint8_t nbr_vdd = 9;
  240. static uint8_t vdd_user_data[9];
  241. static uint8_t user_vdd = 0;
  242. static uint8_t user_vdd_max = 8;
  243.  
  244. #if defined(CONFIG_MSM_CPU_FREQ_ONDEMAND) || \
  245. defined(CONFIG_MSM_CPU_FREQ_USERSPACE) || \
  246. defined(CONFIG_MSM_CPU_FREQ_MSM7K)
  247. #if defined(CONFIG_TURBO_MODE)
  248. static struct cpufreq_frequency_table freq_table[20];
  249.  
  250. static void __init cpufreq_table_init(void)
  251. {
  252. unsigned int i;
  253. unsigned int freq_cnt = 0;
  254.  
  255. /* Construct the freq_table table from acpu_freq_tbl since the
  256. * freq_table values need to match frequencies specified in
  257. * acpu_freq_tbl and acpu_freq_tbl needs to be fixed up during init.
  258. */
  259. for (i = 0; acpu_freq_tbl[i].a11clk_khz != 0
  260. && freq_cnt < ARRAY_SIZE(freq_table)-1; i++) {
  261. if (acpu_freq_tbl[i].use_for_scaling) {
  262. freq_table[freq_cnt].index = freq_cnt;
  263. freq_table[freq_cnt].frequency
  264. = acpu_freq_tbl[i].a11clk_khz;
  265. freq_cnt++;
  266. }
  267. }
  268.  
  269. /* freq_table not big enough to store all usable freqs. */
  270. BUG_ON(acpu_freq_tbl[i].a11clk_khz != 0);
  271.  
  272. freq_table[freq_cnt].index = freq_cnt;
  273. freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
  274.  
  275. pr_info("%d scaling frequencies supported.\n", freq_cnt);
  276. }
  277. #endif
  278. #endif
  279.  
  280. static int acpu_debug_mask;
  281. module_param_call(debug_mask, param_set_int, param_get_int,
  282. &acpu_debug_mask, S_IWUSR | S_IRUGO);
  283.  
  284. static int pc_pll_request(unsigned id, unsigned on)
  285. {
  286. #if 0
  287. int res;
  288. on = !!on;
  289.  
  290. if (acpu_debug_mask & PERF_SWITCH_PLL_DEBUG) {
  291. if (on)
  292. printk(KERN_DEBUG "Enabling PLL %d\n", id);
  293. else
  294. printk(KERN_DEBUG "Disabling PLL %d\n", id);
  295. }
  296.  
  297. res = msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on);
  298. if (res < 0)
  299. return res;
  300.  
  301. if (acpu_debug_mask & PERF_SWITCH_PLL_DEBUG) {
  302. if (on)
  303. printk(KERN_DEBUG "PLL %d enabled\n", id);
  304. else
  305. printk(KERN_DEBUG "PLL %d disabled\n", id);
  306. }
  307. return res;
  308. #endif
  309. return 0;
  310. }
  311.  
  312.  
  313. /*----------------------------------------------------------------------------
  314. * ARM11 'owned' clock control
  315. *---------------------------------------------------------------------------*/
  316. module_param_call(pwrc_khz, param_set_int, param_get_int,
  317. &drv_state.power_collapse_khz, S_IWUSR | S_IRUGO);
  318. module_param_call(wfi_khz, param_set_int, param_get_int,
  319. &drv_state.wait_for_irq_khz, S_IWUSR | S_IRUGO);
  320.  
  321. unsigned long acpuclk_power_collapse(void) {
  322. int ret = acpuclk_get_rate();
  323. acpuclk_set_rate(drv_state.power_collapse_khz * 1000, SETRATE_PC);
  324. return ret * 1000;
  325. }
  326.  
  327. unsigned long acpuclk_wait_for_irq(void) {
  328. int ret = acpuclk_get_rate();
  329. acpuclk_set_rate(drv_state.wait_for_irq_khz * 1000, SETRATE_PC);
  330. return ret * 1000;
  331. }
  332.  
  333. static int acpuclk_set_vdd_level(int vdd)
  334. {
  335. uint32_t current_vdd;
  336.  
  337. current_vdd = readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x07;
  338.  
  339. #if PERF_SWITCH_DEBUG
  340. printk(KERN_DEBUG "acpuclock: Switching VDD from %u -> %d\n",
  341. current_vdd, vdd);
  342. #endif
  343. writel((1 << 7) | (vdd << 3), A11S_VDD_SVS_PLEVEL_ADDR);
  344. udelay(drv_state.vdd_switch_time_us);
  345. if ((readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x7) != vdd) {
  346. #if PERF_SWITCH_DEBUG
  347. printk(KERN_ERR "acpuclock: VDD set failed\n");
  348. #endif
  349. return -EIO;
  350. }
  351.  
  352. #if PERF_SWITCH_DEBUG
  353. printk(KERN_DEBUG "acpuclock: VDD switched\n");
  354. #endif
  355. return 0;
  356. }
  357.  
  358. /* Set proper dividers for the given clock speed. */
  359. static void acpuclk_set_div(const struct clkctl_acpu_speed *hunt_s) {
  360. uint32_t reg_clkctl, reg_clksel, clk_div, src_sel;
  361.  
  362. reg_clksel = readl(A11S_CLK_SEL_ADDR);
  363.  
  364. /* AHB_CLK_DIV */
  365. clk_div = (reg_clksel >> 1) & 0x03;
  366. /* CLK_SEL_SRC1NO */
  367. src_sel = reg_clksel & 1;
  368.  
  369. /*
  370. * If the new clock divider is higher than the previous, then
  371. * program the divider before switching the clock
  372. */
  373. if (hunt_s->ahbclk_div > clk_div) {
  374. reg_clksel &= ~(0x3 << 1);
  375. reg_clksel |= (hunt_s->ahbclk_div << 1);
  376. writel(reg_clksel, A11S_CLK_SEL_ADDR);
  377. }
  378.  
  379. /* Program clock source and divider */
  380. reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
  381. reg_clkctl &= ~(0xFF << (8 * src_sel));
  382. reg_clkctl |= hunt_s->a11clk_src_sel << (4 + 8 * src_sel);
  383. reg_clkctl |= hunt_s->a11clk_src_div << (0 + 8 * src_sel);
  384. writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
  385.  
  386. /* Program clock source selection */
  387. reg_clksel ^= 1;
  388. writel(reg_clksel, A11S_CLK_SEL_ADDR);
  389.  
  390. /*
  391. * If the new clock divider is lower than the previous, then
  392. * program the divider after switching the clock
  393. */
  394. if (hunt_s->ahbclk_div < clk_div) {
  395. reg_clksel &= ~(0x3 << 1);
  396. reg_clksel |= (hunt_s->ahbclk_div << 1);
  397. writel(reg_clksel, A11S_CLK_SEL_ADDR);
  398. }
  399. }
  400.  
  401. int acpuclk_set_rate(unsigned long rate, enum setrate_reason reason)
  402. {
  403. uint32_t reg_clkctl;
  404. struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
  405. int rc = 0;
  406. unsigned int plls_enabled = 0, pll;
  407. unsigned int v_val;
  408. unsigned int n_val=0;
  409. strt_s = cur_s = drv_state.current_speed;
  410.  
  411. WARN_ONCE(cur_s == NULL, "acpuclk_set_rate: not initialized\n");
  412. if (cur_s == NULL)
  413. return -ENOENT;
  414.  
  415. if (rate == (cur_s->a11clk_khz * 1000))
  416. return 0;
  417.  
  418. for (tgt_s = acpu_freq_tbl; tgt_s->a11clk_khz != 0; tgt_s++, n_val++) {
  419. if (tgt_s->a11clk_khz == (rate / 1000))
  420. break;
  421. }
  422.  
  423. if (tgt_s->a11clk_khz == 0)
  424. return -EINVAL;
  425.  
  426. if(user_vdd) // Switch to the user VREG
  427. v_val = vdd_user_data[n_val];
  428. else
  429. v_val = tgt_s->vdd;
  430.  
  431. /* Choose the highest speed speed at or below 'rate' with same PLL. */
  432. if (reason != SETRATE_CPUFREQ
  433. && tgt_s->a11clk_khz < cur_s->a11clk_khz) {
  434. while (tgt_s->pll != ACPU_PLL_TCXO && tgt_s->pll != cur_s->pll)
  435. tgt_s--;
  436. }
  437.  
  438. if (strt_s->pll != ACPU_PLL_TCXO)
  439. plls_enabled |= 1 << strt_s->pll;
  440.  
  441. if (reason == SETRATE_CPUFREQ) {
  442. mutex_lock(&drv_state.lock);
  443. if (strt_s->pll != tgt_s->pll && tgt_s->pll != ACPU_PLL_TCXO) {
  444. rc = pc_pll_request(tgt_s->pll, 1);
  445. if (rc < 0) {
  446. pr_err("PLL%d enable failed (%d)\n", tgt_s->pll, rc);
  447. goto out;
  448. }
  449. plls_enabled |= 1 << tgt_s->pll;
  450. }
  451. /* Increase VDD if needed. */
  452. if (v_val > cur_s->vdd) {
  453. if ((rc = acpuclk_set_vdd_level(v_val)) < 0) {
  454. printk(KERN_ERR "Unable to switch ACPU vdd\n");
  455. goto out;
  456. }
  457. }
  458. } else {
  459. /* Power collapse should also increase VDD. */
  460. if (v_val > cur_s->vdd) {
  461. if ((rc = acpuclk_set_vdd_level(v_val)) < 0) {
  462. printk(KERN_ERR "Unable to switch ACPU vdd\n");
  463. goto out;
  464. }
  465. }
  466. }
  467.  
  468. /* Set wait states for CPU inbetween frequency changes */
  469. reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
  470. reg_clkctl |= (100 << 16); /* set WT_ST_CNT */
  471. writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
  472.  
  473. #if PERF_SWITCH_DEBUG
  474. printk(KERN_INFO "acpuclock: Switching from ACPU rate %u -> %u\n",
  475. strt_s->a11clk_khz * 1000, tgt_s->a11clk_khz * 1000);
  476. #endif
  477.  
  478. while (cur_s != tgt_s) {
  479. /*
  480. * Always jump to target freq if within 256mhz, regulardless of
  481. * PLL. If differnece is greater, use the predefinied
  482. * steppings in the table.
  483. */
  484. int d = abs((int)(cur_s->a11clk_khz - tgt_s->a11clk_khz));
  485. if (d > drv_state.max_speed_delta_khz) {
  486. /* Step up or down depending on target vs current. */
  487. int clk_index = tgt_s->a11clk_khz > cur_s->a11clk_khz ?
  488. cur_s->up : cur_s->down;
  489. if (clk_index < 0) { /* This should not happen. */
  490. printk(KERN_ERR "cur:%u target: %u\n",
  491. cur_s->a11clk_khz, tgt_s->a11clk_khz);
  492. rc = -EINVAL;
  493. goto out;
  494. }
  495. cur_s = &acpu_freq_tbl[clk_index];
  496. } else {
  497. cur_s = tgt_s;
  498. }
  499. #if PERF_SWITCH_STEP_DEBUG
  500. printk(KERN_DEBUG "%s: STEP khz = %u, pll = %d\n",
  501. __FUNCTION__, cur_s->a11clk_khz, cur_s->pll);
  502. #endif
  503. /* Power collapse should also request pll.(19.2->528) */
  504. if (cur_s->pll != ACPU_PLL_TCXO
  505. && !(plls_enabled & (1 << cur_s->pll))) {
  506. rc = pc_pll_request(cur_s->pll, 1);
  507. if (rc < 0) {
  508. pr_err("PLL%d enable failed (%d)\n",
  509. cur_s->pll, rc);
  510. goto out;
  511. }
  512. plls_enabled |= 1 << cur_s->pll;
  513. }
  514.  
  515. acpuclk_set_div(cur_s);
  516. drv_state.current_speed = cur_s;
  517. /* Re-adjust lpj for the new clock speed. */
  518. loops_per_jiffy = cur_s->lpj;
  519. udelay(drv_state.acpu_switch_time_us);
  520. }
  521.  
  522. /* Change the AXI bus frequency if we can. */
  523. /* Don't change it at power collapse, it will cause stability issue. */
  524. if (strt_s->axiclk_khz != tgt_s->axiclk_khz && reason!=SETRATE_PC) {
  525. rc = clk_set_rate(ebi1_clk, tgt_s->axiclk_khz * 1000);
  526. if (rc < 0)
  527. pr_err("Setting AXI min rate failed!\n");
  528. }
  529.  
  530. /* Disable PLLs we are not using anymore. */
  531. plls_enabled &= ~(1 << tgt_s->pll);
  532. for (pll = ACPU_PLL_0; pll <= ACPU_PLL_2; pll++)
  533. if (plls_enabled & (1 << pll)) {
  534. rc = pc_pll_request(pll, 0);
  535. if (rc < 0) {
  536. pr_err("PLL%d disable failed (%d)\n", pll, rc);
  537. goto out;
  538. }
  539. }
  540.  
  541. /* Drop VDD level if we can. */
  542. if (v_val < strt_s->vdd) {
  543. if (acpuclk_set_vdd_level(v_val) < 0)
  544. printk(KERN_ERR "acpuclock: Unable to drop ACPU vdd\n");
  545. }
  546.  
  547. #if PERF_SWITCH_DEBUG
  548. printk(KERN_DEBUG "%s: ACPU speed change complete\n", __FUNCTION__);
  549. #endif
  550.  
  551. /* Nothing else to do for power collapse */
  552. if (reason == SETRATE_PC)
  553. return 0;
  554. out:
  555. if (reason == SETRATE_CPUFREQ)
  556. mutex_unlock(&drv_state.lock);
  557. return rc;
  558. }
  559.  
  560. static void __init acpuclk_init(void)
  561. {
  562. struct clkctl_acpu_speed *speed;
  563. uint32_t div, sel ;
  564. int rc;
  565.  
  566. unsigned int a11clk_khz_new;
  567. uint32_t reg_clkctl;
  568.  
  569. /*
  570. * Determine the rate of ACPU clock
  571. */
  572.  
  573. if (!(readl(A11S_CLK_SEL_ADDR) & 0x01)) { /* CLK_SEL_SRC1N0 */
  574. /* CLK_SRC0_SEL */
  575. sel = (readl(A11S_CLK_CNTL_ADDR) >> 12) & 0x7;
  576. /* CLK_SRC0_DIV */
  577. div = (readl(A11S_CLK_CNTL_ADDR) >> 8) & 0x0f;
  578. } else {
  579. /* CLK_SRC1_SEL */
  580. sel = (readl(A11S_CLK_CNTL_ADDR) >> 4) & 0x07;
  581. /* CLK_SRC1_DIV */
  582. div = readl(A11S_CLK_CNTL_ADDR) & 0x0f;
  583. }
  584.  
  585. if (oc_freq_khz) {
  586. // make sure target freq is multpile of 19.2mhz
  587. oc_freq_khz = (oc_freq_khz / 19200) * 19200;
  588.  
  589. // set pll2 frequency
  590. writel(oc_freq_khz / 19200, MSM_CLK_CTL_BASE+0x33c);
  591. udelay(50);
  592.  
  593. // for overclocking we will set pll2 to a 1 divider
  594. // to have headroom over the max default 1.2ghz/2 setting
  595. if ((sel == ACPU_PLL_2) && div) {
  596. reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
  597. if (!(readl(A11S_CLK_SEL_ADDR) & 0x01)) { /* CLK_SEL_SRC1N0 */
  598. reg_clkctl &= ~(0xf << 8);
  599. } else {
  600. reg_clkctl &= ~0xf;
  601. }
  602. writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
  603. udelay(50);
  604. div = 0;
  605. }
  606.  
  607. // adjust pll2 frequencies
  608. for (speed = acpu_freq_tbl; speed->a11clk_khz != 0; speed++) {
  609. if (speed->pll == ACPU_PLL_2) {
  610. speed->a11clk_src_div = (speed->a11clk_src_div + 2) /2 - 1;
  611. a11clk_khz_new = oc_freq_khz / (speed->a11clk_src_div + 1);
  612.  
  613. if ((sel == ACPU_PLL_2) && (div == speed->a11clk_src_div)) {
  614. // adjust jiffy to new clock speed
  615. loops_per_jiffy = cpufreq_scale(loops_per_jiffy,
  616. speed->a11clk_khz,
  617. a11clk_khz_new);
  618. }
  619. speed->a11clk_khz = a11clk_khz_new;
  620. speed->ahbclk_khz = speed->a11clk_khz / (speed->ahbclk_div+1);
  621. printk("OC: ADJUSTING FREQ TABLE freq=%d div=%d ahbclk=%d ahbdiv=%d\n", speed->a11clk_khz, speed->a11clk_src_div, speed->ahbclk_khz, speed->ahbclk_div);
  622. }
  623. if ((speed-> up < 0) && ((speed + 1)->a11clk_khz)) {
  624. // make sure all up entries are populated
  625. // because set_rate does not know how to jump // in greater than 256mhz increments
  626. speed->up = speed - acpu_freq_tbl + 1;
  627. }
  628. }
  629. }
  630.  
  631. for (speed = acpu_freq_tbl; speed->a11clk_khz != 0; speed++) {
  632. if (speed->a11clk_src_sel == sel
  633. && (speed->a11clk_src_div == div))
  634. break;
  635. }
  636. if (speed->a11clk_khz == 0) {
  637. printk(KERN_WARNING "Warning - ACPU clock reports invalid speed\n");
  638. return;
  639. }
  640.  
  641. drv_state.current_speed = speed;
  642.  
  643. rc = clk_set_rate(ebi1_clk, speed->axiclk_khz * 1000);
  644. if (rc < 0)
  645. pr_err("Setting AXI min rate failed!\n");
  646.  
  647. printk(KERN_INFO "ACPU running at %d KHz\n", speed->a11clk_khz);
  648. }
  649.  
  650. unsigned long acpuclk_get_rate(void)
  651. {
  652. WARN_ONCE(drv_state.current_speed == NULL,
  653. "acpuclk_get_rate: not initialized\n");
  654. if (drv_state.current_speed)
  655. return drv_state.current_speed->a11clk_khz;
  656. else
  657. return 0;
  658. }
  659.  
  660. uint32_t acpuclk_get_switch_time(void)
  661. {
  662. return drv_state.acpu_switch_time_us;
  663. }
  664.  
  665. /*----------------------------------------------------------------------------
  666. * Clock driver initialization
  667. *---------------------------------------------------------------------------*/
  668.  
  669. #define DIV2REG(n) ((n)-1)
  670. #define REG2DIV(n) ((n)+1)
  671. #define SLOWER_BY(div, factor) div = DIV2REG(REG2DIV(div) * factor)
  672.  
  673. static void __init acpu_freq_tbl_fixup(void)
  674. {
  675. unsigned long pll0_l, pll1_l, pll2_l;
  676. int axi_160mhz = 0, axi_200mhz = 0;
  677. struct pll_freq_tbl_map *lst;
  678. struct clkctl_acpu_speed *t;
  679. unsigned int pll0_needs_fixup = 0;
  680.  
  681. /* Wait for the PLLs to be initialized and then read their frequency.
  682. */
  683. do {
  684. pll0_l = readl(PLLn_L_VAL(0)) & 0x3f;
  685. cpu_relax();
  686. udelay(50);
  687. } while (pll0_l == 0);
  688. do {
  689. pll1_l = readl(PLLn_L_VAL(1)) & 0x3f;
  690. cpu_relax();
  691. udelay(50);
  692. } while (pll1_l == 0);
  693. do {
  694. pll2_l = readl(PLLn_L_VAL(2)) & 0x3f;
  695. cpu_relax();
  696. udelay(50);
  697. } while (pll2_l == 0);
  698.  
  699. printk(KERN_INFO "L val: PLL0: %d, PLL1: %d, PLL2: %d\n",
  700. (int)pll0_l, (int)pll1_l, (int)pll2_l);
  701.  
  702. /* Some configurations run PLL0 twice as fast. Instead of having
  703. * separate tables for this case, we simply fix up the ACPU clock
  704. * source divider since it's a simple fix up.
  705. */
  706. if (pll0_l == PLL_491_MHZ) {
  707. pll0_l = PLL_245_MHZ;
  708. pll0_needs_fixup = 1;
  709. }
  710.  
  711. /* Select the right table to use. */
  712. for (lst = acpu_freq_tbl_list; lst->tbl != 0; lst++) {
  713. if (lst->pll0_l == pll0_l && lst->pll1_l == pll1_l
  714. && lst->pll2_l == pll2_l) {
  715. acpu_freq_tbl = lst->tbl;
  716. break;
  717. }
  718. }
  719.  
  720. if (acpu_freq_tbl == NULL) {
  721. pr_crit("Unknown PLL configuration!\n");
  722. BUG();
  723. }
  724.  
  725. /* Fix up PLL0 source divider if necessary. Also, fix up the AXI to
  726. * the max that's supported by the board (RAM used in board).
  727. */
  728. axi_160mhz = (pll0_l == PLL_960_MHZ || pll1_l == PLL_960_MHZ);
  729. axi_200mhz = (pll2_l == PLL_1200_MHZ);
  730. for (t = &acpu_freq_tbl[0]; t->a11clk_khz != 0; t++) {
  731.  
  732. if (pll0_needs_fixup && t->pll == ACPU_PLL_0)
  733. SLOWER_BY(t->a11clk_src_div, 2);
  734. if (axi_160mhz && drv_state.max_axi_khz >= 160000
  735. && t->ahbclk_khz > 128000)
  736. t->axiclk_khz = 160000;
  737. if (axi_200mhz && drv_state.max_axi_khz >= 200000
  738. && t->ahbclk_khz > 160000)
  739. t->axiclk_khz = 200000;
  740. }
  741.  
  742. t--;
  743. if (!axi_160mhz)
  744. pr_info("Turbo mode not supported.\n");
  745. else if (t->axiclk_khz == 160000)
  746. pr_info("Turbo mode supported and enabled.\n");
  747. else
  748. pr_info("Turbo mode supported but not enabled.\n");
  749. }
  750.  
  751. /* Initalize the lpj field in the acpu_freq_tbl. */
  752. static void __init lpj_init(void)
  753. {
  754. int i;
  755. const struct clkctl_acpu_speed *base_clk = drv_state.current_speed;
  756. for (i = 0; acpu_freq_tbl[i].a11clk_khz; i++) {
  757. acpu_freq_tbl[i].lpj = cpufreq_scale(loops_per_jiffy,
  758. base_clk->a11clk_khz,
  759. acpu_freq_tbl[i].a11clk_khz);
  760. }
  761. }
  762.  
  763. static void __init precompute_stepping(void)
  764. {
  765. int i, step_idx, step_same_pll_idx;
  766.  
  767. #define cur_freq acpu_freq_tbl[i].a11clk_khz
  768. #define step_freq acpu_freq_tbl[step_idx].a11clk_khz
  769. #define cur_pll acpu_freq_tbl[i].pll
  770. #define step_pll acpu_freq_tbl[step_idx].pll
  771.  
  772. for (i = 0; acpu_freq_tbl[i].a11clk_khz; i++) {
  773.  
  774. /* Calculate "Up" step. */
  775. step_idx = i + 1;
  776. step_same_pll_idx = -1;
  777. while (step_freq && (step_freq - cur_freq)
  778. <= drv_state.max_speed_delta_khz) {
  779. if (step_pll == cur_pll)
  780. step_same_pll_idx = step_idx;
  781. step_idx++;
  782. }
  783.  
  784. /* Highest freq within max_speed_delta_khz. No step needed. */
  785. if (step_freq == 0)
  786. acpu_freq_tbl[i].up = -1;
  787. else if (step_idx == (i + 1)) {
  788. pr_crit("Delta between freqs %u KHz and %u KHz is"
  789. " too high!\n", cur_freq, step_freq);
  790. BUG();
  791. } else {
  792. /* There is only one TCXO freq. So don't complain. */
  793. if (cur_pll == ACPU_PLL_TCXO)
  794. step_same_pll_idx = step_idx - 1;
  795. if (step_same_pll_idx == -1) {
  796. pr_warning("Suboptimal up stepping for CPU "
  797. "freq %u KHz.\n", cur_freq);
  798. acpu_freq_tbl[i].up = step_idx - 1;
  799. } else
  800. acpu_freq_tbl[i].up = step_same_pll_idx;
  801. }
  802.  
  803. /* Calculate "Down" step. */
  804. step_idx = i - 1;
  805. step_same_pll_idx = -1;
  806. while (step_idx >= 0 && (cur_freq - step_freq)
  807. <= drv_state.max_speed_delta_khz) {
  808. if (step_pll == cur_pll)
  809. step_same_pll_idx = step_idx;
  810. step_idx--;
  811. }
  812.  
  813. /* Lowest freq within max_speed_delta_khz. No step needed. */
  814. if (step_idx == -1)
  815. acpu_freq_tbl[i].down = -1;
  816. else if (step_idx == (i - 1)) {
  817. pr_crit("Delta between freqs %u KHz and %u KHz is"
  818. " too high!\n", cur_freq, step_freq);
  819. BUG();
  820. } else {
  821. if (step_same_pll_idx == -1) {
  822. pr_warning("Suboptimal down stepping for CPU "
  823. "freq %u KHz.\n", cur_freq);
  824. acpu_freq_tbl[i].down = step_idx + 1;
  825. } else
  826. acpu_freq_tbl[i].down = step_same_pll_idx;
  827. }
  828. }
  829. }
  830.  
  831. static void __init print_acpu_freq_tbl(void)
  832. {
  833. struct clkctl_acpu_speed *t;
  834. pr_info("CPU-Freq PLL DIV AHB-Freq ADIV AXI-Freq Dn Up\n");
  835. for (t = &acpu_freq_tbl[0]; t->a11clk_khz != 0; t++)
  836. pr_info("%8d %3d %3d %8d %4d %8d %2d %2d\n",
  837. t->a11clk_khz, t->pll, t->a11clk_src_div + 1,
  838. t->ahbclk_khz, t->ahbclk_div + 1, t->axiclk_khz,
  839. t->down, t->up);
  840. }
  841.  
  842. void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata)
  843. {
  844. pr_info("acpu_clock_init()\n");
  845.  
  846. ebi1_clk = clk_get(NULL, "ebi1_clk");
  847.  
  848. mutex_init(&drv_state.lock);
  849. drv_state.acpu_switch_time_us = clkdata->acpu_switch_time_us;
  850. drv_state.max_speed_delta_khz = clkdata->max_speed_delta_khz;
  851. drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us;
  852. drv_state.power_collapse_khz = clkdata->power_collapse_khz;
  853. drv_state.wait_for_irq_khz = clkdata->wait_for_irq_khz;
  854. drv_state.max_axi_khz = clkdata->max_axi_khz;
  855. acpu_freq_tbl_fixup();
  856. precompute_stepping();
  857. acpuclk_init();
  858. lpj_init();
  859. print_acpu_freq_tbl();
  860. #if defined(CONFIG_MSM_CPU_FREQ_ONDEMAND) || \
  861. defined(CONFIG_MSM_CPU_FREQ_USERSPACE) || \
  862. defined(CONFIG_MSM_CPU_FREQ_MSM7K)
  863. cpufreq_table_init();
  864. cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
  865. #endif
  866. }
  867.  
  868. unsigned long acpuclk_get_max_rate_override(void)
  869. {
  870. return oc_freq_khz;
  871. }
  872.  
  873. #if defined(CONFIG_DEBUG_FS)
  874. // Read the custom VDDs. They have to be seperated by a ',' and with \0 exactly nbr_vdd(Number config lines in the Table)*2
  875. static ssize_t acpu_vdd_fops_write(struct file *filp, const char __user *buf,
  876. size_t count, loff_t *ppos)
  877. {
  878. struct msm_rpc_endpoint *ept;
  879. int rc = 0, i;
  880. uint8_t val;
  881. void *k_buffer;
  882. char *data_pnt;
  883. char *token=NULL;
  884. ept = (struct msm_rpc_endpoint *) filp->private_data;
  885.  
  886. k_buffer = kmalloc(count, GFP_KERNEL);
  887. if (!k_buffer)
  888. return -ENOMEM;
  889.  
  890. if (copy_from_user(k_buffer, buf, count)) {
  891. rc = -EFAULT;
  892. goto write_out_free;
  893. }
  894.  
  895. if (count!=nbr_vdd*2) {
  896. rc = -EFAULT;
  897. goto write_out_free;
  898. }
  899.  
  900. data_pnt = k_buffer;
  901. token=strsep(&data_pnt, ",");
  902. for(i=0; token!=NULL && i<nbr_vdd; i++) {
  903. val=simple_strtoul(token, NULL, 10);
  904. if(val>user_vdd_max||val<0){
  905. rc = -EFAULT;
  906. goto write_out_free;
  907. }
  908. vdd_user_data[i]=val;
  909. token=strsep(&data_pnt, ",");
  910. }
  911. user_vdd = 1;
  912. rc = count;
  913. write_out_free:
  914. kfree(k_buffer);
  915. return rc;
  916. }
  917.  
  918. // Write the active VDDs. They are seperated by a ','
  919. static ssize_t acpu_vdd_fops_read(struct file *file, char __user * buf,
  920. size_t len, loff_t * ppos)
  921. {
  922. char k_buffer[nbr_vdd*2];
  923. int i=0, j=0;
  924. struct clkctl_acpu_speed *tgt_s;
  925. if(user_vdd) {
  926. for(j=0; j<nbr_vdd; j++){
  927. sprintf(&k_buffer[i], "%d", vdd_user_data[j]);
  928. k_buffer[i+1]=',';
  929. i+=2;
  930. }
  931. }
  932. else {
  933. for (tgt_s = acpu_freq_tbl; tgt_s->a11clk_khz != 0; tgt_s++) {
  934. sprintf(&k_buffer[i], "%d", tgt_s->vdd);
  935. k_buffer[i+1]=',';
  936. i+=2;
  937. }
  938. }
  939. k_buffer[nbr_vdd*2-1]= '\0';
  940. if (len < sizeof (k_buffer))
  941. return -EINVAL;
  942. return simple_read_from_buffer(buf, len, ppos, k_buffer,
  943. sizeof (k_buffer));
  944. }
  945.  
  946.  
  947. static struct file_operations acpu_vdd_fops = {
  948. .write = acpu_vdd_fops_write,
  949. .read = acpu_vdd_fops_read,
  950. };
  951.  
  952. static int acpu_vdd_reset_get(void *dat, u64 *val) {
  953. return 0;
  954. }
  955.  
  956. // Resets the custom VDDs to default Values
  957. static int acpu_vdd_reset_set(void *dat, u64 val)
  958. {
  959. user_vdd=0;
  960. return 0;
  961. }
  962.  
  963. DEFINE_SIMPLE_ATTRIBUTE(acpu_vdd_reset_fops,
  964. acpu_vdd_reset_get,
  965. acpu_vdd_reset_set, "%llu\n");
  966.  
  967. static int __init acpu_dbg_init(void)
  968. {
  969. struct dentry *dent;
  970.  
  971. dent = debugfs_create_dir("acpu_dbg", 0);
  972. if (IS_ERR(dent))
  973. return PTR_ERR(dent);
  974.  
  975. debugfs_create_file("acpu_vdd", 0644, dent, NULL,
  976. &acpu_vdd_fops);
  977.  
  978. debugfs_create_file("acpu_vdd_reset", 0644, dent, NULL,
  979. &acpu_vdd_reset_fops);
  980.  
  981. return 0;
  982. }
  983.  
  984. device_initcall(acpu_dbg_init);
  985.  
  986. #endif
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement