Advertisement
desowin

Untitled

Dec 14th, 2011
66
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 17.41 KB | None | 0 0
  1. /**************************************************************************
  2. * linux/arch/arm/mach-dm320/cm.c
  3. *
  4. * DM320 Clock Management
  5. *
  6. * Copyright (c) 2006 Kent Ryhorchuk <kent@zing.net>
  7. *
  8. **************************************************************************/
  9.  
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/delay.h>
  13. #include <linux/module.h>
  14. #include <linux/spinlock.h>
  15. #include <asm/arch/reg_clkc.h>
  16. #include <asm/io.h>
  17. #include <asm/irq.h>
  18. #include <asm/arch/gio.h>
  19. #include <linux/proc_fs.h>
  20. #include <asm/uaccess.h>
  21. #include <asm/arch/cm.h>
  22. #include <linux/devfs_fs_kernel.h>
  23. #include <linux/device.h>
  24. #include <asm/arch/memconfig.h>
  25.  
  26. static int idle_service_level[CM_N_CLOCKS] = {0, 0, 0, 0};
  27. static int running_service_level[CM_N_CLOCKS] = {0xf, 0xf, 0, 0};
  28.  
  29. /* Defaults for the /proc interface. These ensure the board will run. */
  30. static int proc_idle_service_level[CM_N_CLOCKS] = {0, 0, 0, 0};
  31. static int proc_running_service_level[CM_N_CLOCKS] = {0, 0, 0, 0};
  32.  
  33. /* Dividers initialized in init. */
  34. static uint16_t idle_dividers[CM_N_CLOCKS];
  35. static uint16_t running_dividers[CM_N_CLOCKS];
  36. static uint16_t idle_refctl = 0x140;
  37. static uint16_t running_refctl = 0x140;
  38.  
  39. static uint16_t min_cpu_divider = 0;
  40.  
  41. /* This is the base value for the SDRAM refresh cycle. You divide this by
  42. the SDRAM clock divider and add 1 to get the SDRAM refresh cycle base. */
  43. #define REF_CYC_BASE 190 // 64MB, 8K cycle, 64ms.
  44. #define REF_CYC_FOR_DIV(D) ( REF_CYC_BASE / D ) // 0,1 invalid (illegal freqs)
  45.  
  46. /* Returns the desired setting of DIV0, including AHB. */
  47. static uint16_t cpu_divider_for_sl(int sl)
  48. {
  49. uint16_t ret;
  50.  
  51. if( sl > 0xf )
  52. sl = 0xf;
  53.  
  54. /* ARM clock divider is 5 bits, service levels are 4. So shift by 1. */
  55. /* Add 1 so the minimum divider is 2. TODO - determine the minimum level
  56. some other way. */
  57. ret = ( ( 0xf - sl ) << 1 ) + 1;
  58.  
  59. if( ret < min_cpu_divider )
  60. ret = min_cpu_divider;
  61.  
  62. /* If the ARM divider is 2 or less, set the AHB divider to 2. */
  63. if( ret <= 1 )
  64. ret |= 0x0100;
  65.  
  66. return ret;
  67. }
  68.  
  69. /* Determined in init. */
  70. static uint16_t min_ram_divider = 0;
  71.  
  72. static uint16_t ram_divider_for_sl(int sl)
  73. {
  74. uint16_t ret;
  75.  
  76. if( sl > 0xf )
  77. sl = 0xf;
  78.  
  79. ret = ( 0xf - sl ) << 1; /* Allow for 0. */
  80. /* On some boards the minimum divider is 1, on others it is 3. We use what
  81. was found at startup as the minimum. */
  82. if( ret < min_ram_divider )
  83. ret = min_ram_divider;
  84.  
  85. return ret;
  86. }
  87.  
  88. /* Determined in init. */
  89. static uint16_t min_axl_divider = 0;
  90.  
  91. static uint16_t axl_divider_for_sl(int sl, uint16_t ram_div)
  92. {
  93. uint16_t ret;
  94.  
  95. /* DM320 Errata #10 - AXL clock has to be some amount faster than SDRAM.
  96. The inequality is 1/AXL + 0.0028 < 1/SDR .
  97. If you do the math for the PLLA only case, this means that AXL divider
  98. must be at least one less than the SDRAM divider. The inequality also
  99. holds for the case where SDRAM is clocked from PLLB and PLLB is running
  100. slower than PLLA. */
  101.  
  102. if( sl > 0xf )
  103. sl = 0xf;
  104.  
  105. ret = ( ( 0xf - sl ) << 1 ) + 1;
  106.  
  107. /* Complicated by the fact that on some boards the RAM divider is 1. */
  108. if( ram_div == 0 ){
  109. ret = min_axl_divider;
  110. }else if( ret >= ram_div ){
  111. ret = ram_div - 1;
  112. }
  113.  
  114. if( ret < min_axl_divider )
  115. ret = min_axl_divider;
  116.  
  117. return ret;
  118. }
  119.  
  120. static uint16_t min_dsp_divider = 0;
  121.  
  122. static uint16_t dsp_divider_for_sl(int sl, uint16_t cpu_div)
  123. {
  124. uint16_t ret;
  125.  
  126. if( sl > 0xf )
  127. sl = 0xf;
  128.  
  129. ret = ( ( 0xf - sl ) << 1 ) + 1;
  130. if( ret < min_dsp_divider )
  131. ret = min_dsp_divider;
  132.  
  133. if( cpu_div & 0x0100 ){
  134. cpu_div &= 0xff;
  135. cpu_div += 1;
  136. }
  137.  
  138. if( cpu_div < ret )
  139. ret = cpu_div;
  140.  
  141. return ret;
  142. }
  143.  
  144. static void set_idle_divider(cm_clock_t clock)
  145. {
  146. switch( clock ){
  147. case CM_CPU :
  148. idle_dividers[CM_CPU] = cpu_divider_for_sl(idle_service_level[CM_CPU]);
  149. break;
  150. case CM_RAM :
  151. case CM_AXL:
  152. idle_dividers[CM_RAM] = ram_divider_for_sl(idle_service_level[CM_RAM]);
  153. idle_dividers[CM_AXL] = axl_divider_for_sl(idle_service_level[CM_CPU],
  154. idle_dividers[CM_RAM]);
  155. idle_refctl = 0x100 | REF_CYC_FOR_DIV(idle_dividers[CM_RAM]);
  156. break;
  157. case CM_DSP :
  158. idle_dividers[CM_DSP] = dsp_divider_for_sl(idle_service_level[CM_DSP],
  159. idle_dividers[CM_CPU]);
  160. break;
  161. default:
  162. break;
  163. }
  164.  
  165. }
  166.  
  167. static void set_running_divider(cm_clock_t clock)
  168. {
  169. switch( clock ){
  170. case CM_CPU :
  171. running_dividers[CM_CPU] =
  172. cpu_divider_for_sl(running_service_level[CM_CPU]);
  173. break;
  174. case CM_RAM :
  175. case CM_AXL :
  176. running_dividers[CM_RAM] =
  177. ram_divider_for_sl(running_service_level[CM_RAM]);
  178. running_dividers[CM_AXL] =
  179. axl_divider_for_sl(running_service_level[CM_CPU],
  180. running_dividers[CM_RAM]);
  181. running_refctl = 0x100 | REF_CYC_FOR_DIV(running_dividers[CM_RAM]);
  182. break;
  183. case CM_DSP :
  184. running_dividers[CM_DSP] =
  185. dsp_divider_for_sl(running_service_level[CM_DSP],
  186. running_dividers[CM_CPU]);
  187. break;
  188. default:
  189. break;
  190. }
  191.  
  192. }
  193.  
  194. static void set_dividers(void)
  195. {
  196. idle_dividers[CM_CPU] = cpu_divider_for_sl(idle_service_level[CM_CPU]);
  197. idle_dividers[CM_RAM] = ram_divider_for_sl(idle_service_level[CM_RAM]);
  198. idle_dividers[CM_AXL] = axl_divider_for_sl(idle_service_level[CM_CPU],
  199. idle_dividers[CM_RAM]);
  200. idle_dividers[CM_DSP] = dsp_divider_for_sl(idle_service_level[CM_DSP],
  201. idle_dividers[CM_CPU]);
  202.  
  203. running_dividers[CM_CPU] =
  204. cpu_divider_for_sl(running_service_level[CM_CPU]);
  205. running_dividers[CM_RAM] =
  206. ram_divider_for_sl(running_service_level[CM_RAM]);
  207. running_dividers[CM_AXL] =
  208. axl_divider_for_sl(running_service_level[CM_CPU],
  209. running_dividers[CM_RAM]);
  210. running_dividers[CM_DSP] =
  211. dsp_divider_for_sl(running_service_level[CM_DSP],
  212. running_dividers[CM_CPU]);
  213.  
  214. running_refctl = 0x100 | REF_CYC_FOR_DIV(running_dividers[CM_RAM]);
  215. idle_refctl = 0x100 | REF_CYC_FOR_DIV(idle_dividers[CM_RAM]);
  216. }
  217.  
  218. int disable_throttle(cm_clock_t clock, int level)
  219. {
  220. unsigned long flags;
  221. local_irq_save(flags);
  222. idle_service_level[clock] += level;
  223. set_idle_divider(clock);
  224. local_irq_restore(flags);
  225. return level;
  226. }
  227.  
  228. EXPORT_SYMBOL(disable_throttle);
  229.  
  230. void enable_throttle(cm_clock_t clock, int token)
  231. {
  232. unsigned long flags;
  233. local_irq_save(flags);
  234. idle_service_level[clock] -= token;
  235. if( idle_service_level[clock] < 0 )
  236. idle_service_level[clock] = 0;
  237.  
  238. set_idle_divider(clock);
  239. local_irq_restore(flags);
  240. }
  241.  
  242. EXPORT_SYMBOL(enable_throttle);
  243.  
  244. int enable_boost(cm_clock_t clock, int level)
  245. {
  246. unsigned long flags;
  247. local_irq_save(flags);
  248. running_service_level[clock] += level;
  249. set_running_divider(clock);
  250. dm320_exit_idle();
  251. local_irq_restore(flags);
  252. return level;
  253. }
  254.  
  255. EXPORT_SYMBOL(enable_boost);
  256.  
  257. void disable_boost(cm_clock_t clock, int token)
  258. {
  259. unsigned long flags;
  260. local_irq_save(flags);
  261. running_service_level[clock] -= token;
  262. if( idle_service_level[clock] < 0 )
  263. idle_service_level[clock] = 0;
  264.  
  265. set_running_divider(clock);
  266. dm320_exit_idle();
  267. local_irq_restore(flags);
  268. }
  269.  
  270. EXPORT_SYMBOL(disable_boost);
  271.  
  272. void throttle_all(cm_setsl_in_t *a, cm_user_t *cmu)
  273. {
  274. int i;
  275. unsigned long flags;
  276.  
  277. local_irq_save(flags);
  278.  
  279. for( i = 0 ; i < CM_N_CLOCKS ; i++ ){
  280. idle_service_level[i] -= cmu->idle_tokens[i];
  281. idle_service_level[i] += cmu->idle_tokens[i] = a[0].sl[i];
  282. running_service_level[i] -= cmu->running_tokens[i];
  283. running_service_level[i] += cmu->running_tokens[i] = a[1].sl[i];
  284. }
  285.  
  286. set_dividers();
  287. dm320_exit_idle();
  288.  
  289. local_irq_restore(flags);
  290. }
  291.  
  292. void dm320_enter_idle(void)
  293. {
  294. uint16_t div0_new, div1_new, div2_new;
  295.  
  296. div0_new = idle_dividers[CM_CPU]; /* Includes AHB. */
  297. div1_new = ( idle_dividers[CM_AXL] << 8 ) | idle_dividers[CM_RAM];
  298. div2_new = idle_dividers[CM_DSP] << 8;
  299.  
  300. outw(div0_new, DM320_CLKC_DIV0);
  301. outw(div1_new, DM320_CLKC_DIV1);
  302. outw(div2_new, DM320_CLKC_DIV2);
  303. outw(idle_refctl, DM320_SDRAMC_REFCTL);
  304. }
  305.  
  306. void dm320_exit_idle(void)
  307. {
  308. uint16_t div0_new, div1_new, div2_new;
  309.  
  310. div0_new = running_dividers[CM_CPU];
  311. div1_new = ( running_dividers[CM_AXL] << 8 ) | running_dividers[CM_RAM];
  312. div2_new = running_dividers[CM_DSP] << 8;
  313.  
  314. outw(div2_new, DM320_CLKC_DIV2);
  315. outw(div1_new, DM320_CLKC_DIV1);
  316. outw(div0_new, DM320_CLKC_DIV0);
  317. outw(running_refctl, DM320_SDRAMC_REFCTL);
  318. }
  319.  
  320. /* This is the number of bits of precision for the loops_per_jiffy. Each
  321. bit takes on average 1.5/HZ seconds. This (like the original) is a little
  322. better than 1% */
  323. #define LPS_PREC 8
  324.  
  325. void recalibrate_delay(void)
  326. {
  327. #if !defined(CONFIG_BOGOMIPS) || !CONFIG_BOGOMIPS
  328. unsigned long ticks, loopbit;
  329. int lps_precision = LPS_PREC;
  330.  
  331. loops_per_jiffy = (1<<12);
  332.  
  333. printk("Recalibrating delay loop... ");
  334. while (loops_per_jiffy <<= 1) {
  335. /* wait for "start of" clock tick */
  336. ticks = jiffies;
  337. while (ticks == jiffies)
  338. /* nothing */;
  339. /* Go .. */
  340. ticks = jiffies;
  341. __delay(loops_per_jiffy);
  342. ticks = jiffies - ticks;
  343. if (ticks)
  344. break;
  345. }
  346.  
  347. /* Do a binary approximation to get loops_per_jiffy set to equal one clock
  348. (up to lps_precision bits) */
  349. loops_per_jiffy >>= 1;
  350. loopbit = loops_per_jiffy;
  351. while ( lps_precision-- && (loopbit >>= 1) ) {
  352. loops_per_jiffy |= loopbit;
  353. ticks = jiffies;
  354. while (ticks == jiffies);
  355. ticks = jiffies;
  356. __delay(loops_per_jiffy);
  357. if (jiffies != ticks) /* longer than 1 tick */
  358. loops_per_jiffy &= ~loopbit;
  359. }
  360. #else
  361. printk("Using pre-calculated value: ");
  362. #endif
  363.  
  364. /* Round the value and print it */
  365. printk("%lu.%02lu BogoMIPS\n",
  366. loops_per_jiffy/(500000/HZ),
  367. (loops_per_jiffy/(5000/HZ)) % 100);
  368. }
  369.  
  370. static int throttle_read_proc(char *page, char **start, off_t off,
  371. int count, int *eof, void *data)
  372. {
  373. int len;
  374.  
  375. len = sprintf(page,
  376. "\
  377. idle: cpu %02d, ram %02d, axl %02d, dsp %02d\n\
  378. idle_div: cpu %03x, ram %02x, axl %02x, dsp %02x\n\
  379. running: cpu %02d, ram %02d, axl %02d, dsp %02d\n\
  380. running_div: cpu %03x, ram %02x, axl %02x, dsp %02x\n\
  381. ",
  382. idle_service_level[CM_CPU], idle_service_level[CM_RAM],
  383. idle_service_level[CM_AXL], idle_service_level[CM_DSP],
  384. idle_dividers[CM_CPU], idle_dividers[CM_RAM],
  385. idle_dividers[CM_AXL], idle_dividers[CM_DSP],
  386. running_service_level[CM_CPU], running_service_level[CM_RAM],
  387. running_service_level[CM_AXL], running_service_level[CM_DSP],
  388. running_dividers[CM_CPU], running_dividers[CM_RAM],
  389. running_dividers[CM_AXL], running_dividers[CM_DSP]);
  390.  
  391. len -= off;
  392. *start = page + off;
  393.  
  394. if (len > count)
  395. len = count;
  396. else
  397. *eof = 1;
  398.  
  399. if (len < 0)
  400. len = 0;
  401.  
  402. return len;
  403. }
  404.  
  405. static cm_user_t proc_cmu;
  406.  
  407. static int throttle_write_proc(struct file *file, const char __user *buffer,
  408. unsigned long count, void *data)
  409. {
  410. int len, i;
  411. unsigned long num;
  412. int sl_in[CM_N_CLOCKS];
  413. char str[20];
  414.  
  415. len = sizeof(str);
  416. if (count < len)
  417. len = count;
  418.  
  419. if (copy_from_user(str, buffer, len) > 0)
  420. return -EFAULT;
  421.  
  422. num = sscanf(str, " %d %d %d %d ", &sl_in[0], &sl_in[1], &sl_in[2],
  423. &sl_in[3]);
  424. if( num != 4 ){
  425. printk("Invalid string written to throttle.\n");
  426. return -EINVAL;
  427. }
  428.  
  429. for( i = 0 ; i < CM_N_CLOCKS ; i++ ){
  430. enable_throttle(i, proc_cmu.idle_tokens[i]);
  431. proc_cmu.idle_tokens[i] = disable_throttle(i, sl_in[i]);
  432. }
  433.  
  434. return count;
  435. }
  436.  
  437. #define THROTTLE_CHAR_MAJOR 154
  438. #define THROTTLE_CPU_MINOR 0
  439. #define THROTTLE_RAM_MINOR 1
  440.  
  441. static cm_user_t *users[CM_MAX_USERS];
  442.  
  443. void throttle_add(int index)
  444. {
  445. devfs_mk_cdev(MKDEV(THROTTLE_CHAR_MAJOR, index),
  446. S_IFCHR | S_IRUGO | S_IWUGO, "throttle/%d", index);
  447. }
  448.  
  449. void throttle_remove(int index)
  450. {
  451. devfs_remove("throttle/%d", index);
  452. }
  453.  
  454. #define TO_CM_USER(file) (cm_user_t *)(file->private_data)
  455.  
  456. static int throttle_open(struct inode *inode, struct file *file)
  457. {
  458. int i;
  459. cm_user_t *cmu;
  460.  
  461. /* Check for space in the list */
  462. for( i = 0 ; i < CM_MAX_USERS ; i++ )
  463. if( users[i] == NULL )
  464. break;
  465.  
  466. if( i == CM_MAX_USERS )
  467. return -EMFILE;
  468.  
  469. cmu = users[i] = kmalloc(sizeof(cm_user_t), GFP_KERNEL);
  470. if( users[i] == NULL )
  471. return -ENOMEM;
  472.  
  473. cmu->index = i;
  474. for( i = 0 ; i < CM_N_CLOCKS ; i++ ){
  475. cmu->idle_tokens[i] = 0;
  476. cmu->running_tokens[i] = 0;
  477. }
  478.  
  479. file->private_data = cmu;
  480. return 0;
  481. }
  482.  
  483. static int throttle_close(struct inode *inode, struct file *file)
  484. {
  485. cm_user_t *cmu = TO_CM_USER(file);
  486. int i;
  487. unsigned long flags;
  488.  
  489. users[cmu->index] = NULL;
  490.  
  491. /* Remove users influence on the dividers and reset them. */
  492. local_irq_save(flags);
  493. for( i = 0 ; i < CM_N_CLOCKS ; i++ ){
  494. enable_throttle(i, cmu->idle_tokens[i]);
  495. disable_boost(i, cmu->running_tokens[i]);
  496. }
  497.  
  498. set_dividers();
  499. dm320_exit_idle(); /* Apply running clocks now. */
  500. local_irq_restore(flags);
  501.  
  502. kfree(cmu);
  503. //printk("Close clock %d idle service level is now %d\n", clock,
  504. // idle_service_level[clock]);
  505. return 0;
  506. }
  507.  
  508. static int throttle_ioctl(struct inode *inode, struct file *file,
  509. u_int cmd, u_long arg)
  510. {
  511. cm_user_t *cmu = TO_CM_USER(file);
  512. void __user *argp = (void __user *)arg;
  513. cm_setsl_in_t setsl_arg;
  514. cm_setsl_in_t setallsl_arg[2];
  515. u_long size;
  516. int i;
  517.  
  518. size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
  519. if (cmd & IOC_IN) {
  520. if (!access_ok(VERIFY_READ, argp, size))
  521. return -EFAULT;
  522.  
  523. }
  524.  
  525. if (cmd & IOC_OUT) {
  526. if (!access_ok(VERIFY_WRITE, argp, size))
  527. return -EFAULT;
  528.  
  529. }
  530.  
  531. if( cmd == CM_SET_ALL_SL ){
  532. if( copy_from_user(&setallsl_arg, argp, sizeof(setallsl_arg)) )
  533. return -EFAULT;
  534.  
  535. }else{
  536. if( copy_from_user(&setsl_arg, argp, sizeof(setsl_arg)) )
  537. return -EFAULT;
  538.  
  539. }
  540.  
  541. switch( cmd ){
  542. case CM_SET_IDLE_SL :
  543. for( i = 0 ; i < CM_N_CLOCKS ; i++ ){
  544. enable_throttle(i, cmu->idle_tokens[i]);
  545. cmu->idle_tokens[i] =
  546. disable_throttle(i, setsl_arg.sl[i]);
  547. }
  548.  
  549. break;
  550. case CM_SET_RUNNING_SL :
  551. for( i = 0 ; i < CM_N_CLOCKS ; i++ ){
  552. disable_boost(i, cmu->running_tokens[i]);
  553. cmu->running_tokens[i] =
  554. enable_boost(i, setsl_arg.sl[i]);
  555. }
  556.  
  557. break;
  558. case CM_SET_ALL_SL :
  559. throttle_all(setallsl_arg, cmu);
  560. break;
  561. default:
  562. return -EINVAL;
  563. }
  564.  
  565. return 0;
  566. }
  567.  
  568. static ssize_t throttle_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
  569. {
  570. return -EINVAL;
  571. }
  572.  
  573. static ssize_t throttle_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
  574. {
  575. return -EINVAL;
  576. }
  577.  
  578. static struct file_operations cm_fops = {
  579. .owner = THIS_MODULE,
  580. .llseek = no_llseek,
  581. .read = throttle_read,
  582. .write = throttle_write,
  583. .open = throttle_open,
  584. .release = throttle_close,
  585. .ioctl = throttle_ioctl,
  586. };
  587.  
  588. static int __init cm_init(void)
  589. {
  590. struct proc_dir_entry *de;
  591. int i;
  592.  
  593. printk("DM320 clock management\n");
  594.  
  595. if( register_chrdev(THROTTLE_CHAR_MAJOR, "throttle", &cm_fops) ){
  596. printk("Can't allocate major number %d for throttle.\n",
  597. THROTTLE_CHAR_MAJOR);
  598. return -EAGAIN;
  599. }
  600.  
  601. devfs_mk_dir("throttle");
  602. throttle_add(0);
  603.  
  604. de = create_proc_entry("throttle", 0644, NULL);
  605. if (!de)
  606. return -1;
  607.  
  608. de->read_proc = (read_proc_t *) throttle_read_proc;
  609. de->write_proc = (write_proc_t *) throttle_write_proc;
  610. de->owner = THIS_MODULE;
  611.  
  612. /* Minumum dividers. */
  613. min_cpu_divider = inw(DM320_CLKC_DIV0) & 0x001f;
  614. min_ram_divider = inw(DM320_CLKC_DIV1) & 0x001f;
  615. min_axl_divider = ( inw(DM320_CLKC_DIV1) & 0x1f00 ) >> 8;
  616. min_dsp_divider = ( inw(DM320_CLKC_DIV2) & 0x1f00 ) >> 8;
  617. set_dividers();
  618.  
  619. for( i = 0 ; i < CM_N_CLOCKS ; i++ ){
  620. proc_cmu.idle_tokens[i] =
  621. disable_throttle(i, proc_idle_service_level[i]);
  622. proc_cmu.running_tokens[i] =
  623. enable_boost(i, proc_running_service_level[i]);
  624. }
  625.  
  626. return 0;
  627. }
  628.  
  629. static void __exit cm_exit(void)
  630. {
  631. throttle_remove(0);
  632. unregister_chrdev(THROTTLE_CHAR_MAJOR, "throttle");
  633. remove_proc_entry("throttle", NULL);
  634. }
  635.  
  636. module_init(cm_init);
  637. module_exit(cm_exit);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement