Advertisement
Guest User

Untitled

a guest
Jul 7th, 2015
298
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.86 KB | None | 0 0
  1. /*
  2. * Copyright 2006-2012 Freescale Semiconductor, Inc. All rights reserved.
  3. */
  4.  
  5. /*
  6. * The code contained herein is licensed under the GNU General Public
  7. * License. You may obtain a copy of the GNU General Public License
  8. * Version 2 or later at the following locations:
  9. *
  10. * http://www.opensource.org/licenses/gpl-license.html
  11. * http://www.gnu.org/copyleft/gpl.html
  12. */
  13.  
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched.h>
  17. #include <linux/mman.h>
  18. #include <linux/init.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/fs.h>
  21. #include <linux/version.h>
  22. #include <linux/delay.h>
  23. #include <mach/dma.h>
  24.  
  25. #include <linux/dmaengine.h>
  26. #include <linux/device.h>
  27.  
  28. #include <linux/io.h>
  29. #include <linux/delay.h>
  30.  
  31. static int gMajor; /* major number of device */
  32. static struct class *dma_tm_class;
  33. u32 *wbuf, *wbuf2, *wbuf3;
  34. u32 *rbuf, *rbuf2, *rbuf3;
  35.  
  36. struct dma_chan *dma_m2m_chan;
  37.  
  38. struct completion dma_m2m_ok;
  39.  
  40. struct scatterlist sg[3], sg2[3];
  41.  
  42. #define SDMA_BUF_SIZE 1024
  43.  
  44.  
  45.  
  46. static bool dma_m2m_filter(struct dma_chan *chan, void *param)
  47. {
  48. if (!imx_dma_is_general_purpose(chan))
  49. return false;
  50. chan->private = param;
  51. return true;
  52. }
  53.  
  54. int sdma_open(struct inode * inode, struct file * filp)
  55. {
  56. dma_cap_mask_t dma_m2m_mask;
  57. struct imx_dma_data m2m_dma_data = {0};
  58.  
  59. init_completion(&dma_m2m_ok);
  60.  
  61. dma_cap_zero(dma_m2m_mask);
  62. dma_cap_set(DMA_SLAVE, dma_m2m_mask);
  63. m2m_dma_data.peripheral_type = IMX_DMATYPE_MEMORY;
  64. m2m_dma_data.priority = DMA_PRIO_HIGH;
  65.  
  66. dma_m2m_chan = dma_request_channel(dma_m2m_mask, dma_m2m_filter, &m2m_dma_data);
  67. if (!dma_m2m_chan) {
  68. printk("Error opening the SDMA memory to memory channel\n");
  69. return -EINVAL;
  70. }
  71.  
  72. wbuf = kzalloc(SDMA_BUF_SIZE, GFP_DMA);
  73. if(!wbuf) {
  74. printk("error wbuf !!!!!!!!!!!\n");
  75. return -1;
  76. }
  77.  
  78. wbuf2 = kzalloc(SDMA_BUF_SIZE/2, GFP_DMA);
  79. if(!wbuf2) {
  80. printk("error wbuf2 !!!!!!!!!!!\n");
  81. return -1;
  82. }
  83.  
  84. wbuf3 = kzalloc(SDMA_BUF_SIZE, GFP_DMA);
  85. if(!wbuf3) {
  86. printk("error wbuf3 !!!!!!!!!!!\n");
  87. return -1;
  88. }
  89.  
  90. #if 0
  91. rbuf = kzalloc(SDMA_BUF_SIZE, GFP_DMA);
  92. if(!rbuf) {
  93. printk("error rbuf !!!!!!!!!!!\n");
  94. return -1;
  95. }
  96. #else
  97. {
  98. int FPGA_ADDR_BASE = 0x0C000000;
  99.  
  100. rbuf = ioremap(FPGA_ADDR_BASE, 1024*1024);
  101. if(!rbuf) {
  102. printk("error rbuf !!!!!!!!!!!\n");
  103. return -1;
  104. }
  105. printk(KERN_ERR "got rbuf=0x%x\n", rbuf);
  106. }
  107. #endif
  108.  
  109. rbuf2 = kzalloc(SDMA_BUF_SIZE/2, GFP_DMA);
  110. if(!rbuf2) {
  111. printk("error rbuf2 !!!!!!!!!!!\n");
  112. return -1;
  113. }
  114.  
  115. rbuf3 = kzalloc(SDMA_BUF_SIZE, GFP_DMA);
  116. if(!rbuf3) {
  117. printk("error rbuf3 !!!!!!!!!!!\n");
  118. return -1;
  119. }
  120. return 0;
  121. }
  122.  
  123. int sdma_release(struct inode * inode, struct file * filp)
  124. {
  125. dma_release_channel(dma_m2m_chan);
  126. dma_m2m_chan = NULL;
  127. kfree(wbuf);
  128. kfree(wbuf2);
  129. kfree(wbuf3);
  130. iounmap(rbuf);
  131. kfree(rbuf2);
  132. kfree(rbuf3);
  133. return 0;
  134. }
  135.  
  136. ssize_t sdma_read (struct file *filp, char __user * buf, size_t count,
  137. loff_t * offset)
  138. {
  139. int i;
  140.  
  141. #if 0
  142. for (i=0; i<SDMA_BUF_SIZE/4; i++) {
  143. if (*(rbuf+i) != *(wbuf+i)) {
  144. printk("buffer 1 copy falled!\n");
  145. return 0;
  146. }
  147. }
  148. printk("buffer 1 copy passed!\n");
  149.  
  150. for (i=0; i<SDMA_BUF_SIZE/2/4; i++) {
  151. if (*(rbuf2+i) != *(wbuf2+i)) {
  152. printk("buffer 2 copy falled!\n");
  153. return 0;
  154. }
  155. }
  156. printk("buffer 2 copy passed!\n");
  157.  
  158. for (i=0; i<SDMA_BUF_SIZE/4; i++) {
  159. if (*(rbuf3+i) != *(wbuf3+i)) {
  160. printk("buffer 3 copy falled!\n");
  161. return 0;
  162. }
  163. }
  164. printk("buffer 3 copy passed!\n");
  165. #endif
  166.  
  167. return 0;
  168. }
  169.  
  170. static void dma_m2m_callback(void *data)
  171. {
  172. printk("in %s\n",__func__);
  173. complete(&dma_m2m_ok);
  174. return ;
  175. }
  176.  
  177. ssize_t sdma_write(struct file * filp, const char __user * buf, size_t count,
  178. loff_t * offset)
  179. {
  180. u32 *index1, *index2, *index3, i, ret;
  181. struct dma_slave_config dma_m2m_config;
  182. struct dma_async_tx_descriptor *dma_m2m_desc;
  183. index1 = wbuf;
  184. index2 = wbuf2;
  185. index3 = wbuf3;
  186.  
  187. for (i=0; i<SDMA_BUF_SIZE/4; i++) {
  188. *(index1 + i) = 0x12121212;
  189. }
  190.  
  191. for (i=0; i<SDMA_BUF_SIZE/2/4; i++) {
  192. *(index2 + i) = 0x34343434;
  193. }
  194.  
  195. for (i=0; i<SDMA_BUF_SIZE/4; i++) {
  196. *(index3 + i) = 0x56565656;
  197. }
  198. #if 0
  199. for (i=0; i<SDMA_BUF_SIZE/4; i++) {
  200. printk("input data_%d : %x\n", i, *(wbuf+i));
  201. }
  202.  
  203. for (i=0; i<SDMA_BUF_SIZE/2/4; i++) {
  204. printk("input data2_%d : %x\n", i, *(wbuf2+i));
  205. }
  206.  
  207. for (i=0; i<SDMA_BUF_SIZE/4; i++) {
  208. printk("input data3_%d : %x\n", i, *(wbuf3+i));
  209. }
  210. #endif
  211. dma_m2m_config.direction = DMA_MEM_TO_DEV;
  212. dma_m2m_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  213. dmaengine_slave_config(dma_m2m_chan, &dma_m2m_config);
  214.  
  215. printk(KERN_ERR "%s:%d\n", __func__, __LINE__);
  216. sg_init_table(sg, 1);
  217. sg_set_buf(&sg[0], wbuf, SDMA_BUF_SIZE);
  218. #if 0
  219. sg_set_buf(&sg[1], wbuf2, SDMA_BUF_SIZE/2);
  220. sg_set_buf(&sg[2], wbuf3, SDMA_BUF_SIZE);
  221. #endif
  222. ret = dma_map_sg(NULL, sg, 1, dma_m2m_config.direction);
  223.  
  224. printk(KERN_ERR "%s:%d\n", __func__, __LINE__);
  225. dma_m2m_desc = dma_m2m_chan->device->device_prep_slave_sg(dma_m2m_chan,sg, 1, dma_m2m_config.direction, 1);
  226.  
  227. printk(KERN_ERR "%s:%d\n", __func__, __LINE__);
  228. sg_init_table(sg2, 1);
  229. sg_set_buf(&sg2[0], rbuf, SDMA_BUF_SIZE);
  230. #if 0
  231. sg_set_buf(&sg2[1], rbuf2, SDMA_BUF_SIZE/2);
  232. sg_set_buf(&sg2[2], rbuf3, SDMA_BUF_SIZE);
  233. #endif
  234. ret = dma_map_sg(NULL, sg2, 1, dma_m2m_config.direction);
  235.  
  236. printk(KERN_ERR "%s:%d\n", __func__, __LINE__);
  237. dma_m2m_desc = dma_m2m_chan->device->device_prep_slave_sg(dma_m2m_chan,sg2, 1, dma_m2m_config.direction, 0);
  238.  
  239. printk(KERN_ERR "%s:%d m2m_desc=%p\n", __func__, __LINE__, dma_m2m_desc);
  240. dma_m2m_desc->callback = dma_m2m_callback;
  241. printk(KERN_ERR "%s:%d\n", __func__, __LINE__);
  242. dmaengine_submit(dma_m2m_desc);
  243.  
  244. printk(KERN_ERR "%s:%d\n", __func__, __LINE__);
  245. wait_for_completion(&dma_m2m_ok);
  246. dma_unmap_sg(NULL, sg, 1, dma_m2m_config.direction);
  247. dma_unmap_sg(NULL, sg2, 1, dma_m2m_config.direction);
  248. printk(KERN_ERR "%s:%d\n", __func__, __LINE__);
  249. return 0;
  250. }
  251.  
  252. struct file_operations dma_fops = {
  253. open: sdma_open,
  254. release: sdma_release,
  255. read: sdma_read,
  256. write: sdma_write,
  257. };
  258.  
  259. int __init sdma_init_module(void)
  260. {
  261. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
  262. struct device *temp_class;
  263. #else
  264. struct class_device *temp_class;
  265. #endif
  266. int error;
  267.  
  268. /* register a character device */
  269. error = register_chrdev(0, "sdma_test", &dma_fops);
  270. if (error < 0) {
  271. printk("SDMA test driver can't get major number\n");
  272. return error;
  273. }
  274. gMajor = error;
  275. printk("SDMA test major number = %d\n",gMajor);
  276.  
  277. dma_tm_class = class_create(THIS_MODULE, "sdma_test");
  278. if (IS_ERR(dma_tm_class)) {
  279. printk(KERN_ERR "Error creating sdma test module class.\n");
  280. unregister_chrdev(gMajor, "sdma_test");
  281. return PTR_ERR(dma_tm_class);
  282. }
  283.  
  284. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
  285. temp_class = device_create(dma_tm_class, NULL,
  286. MKDEV(gMajor, 0), NULL, "sdma_test");
  287. #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
  288. temp_class = device_create(dma_tm_class, NULL,
  289. MKDEV(gMajor, 0), "sdma_test");
  290. #else
  291. temp_class = class_device_create(dma_tm_class, NULL,
  292. MKDEV(gMajor, 0), NULL,
  293. "sdma_test");
  294. #endif
  295. if (IS_ERR(temp_class)) {
  296. printk(KERN_ERR "Error creating sdma test class device.\n");
  297. class_destroy(dma_tm_class);
  298. unregister_chrdev(gMajor, "sdma_test");
  299. return -1;
  300. }
  301.  
  302. printk("SDMA test Driver Module loaded\n");
  303. return 0;
  304. }
  305.  
  306. static void sdma_cleanup_module(void)
  307. {
  308. unregister_chrdev(gMajor, "sdma_test");
  309. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
  310. device_destroy(dma_tm_class, MKDEV(gMajor, 0));
  311. #else
  312. class_device_destroy(dma_tm_class, MKDEV(gMajor, 0));
  313. #endif
  314. class_destroy(dma_tm_class);
  315.  
  316. printk("SDMA test Driver Module Unloaded\n");
  317. }
  318.  
  319.  
  320. module_init(sdma_init_module);
  321. module_exit(sdma_cleanup_module);
  322.  
  323. MODULE_AUTHOR("Freescale Semiconductor");
  324. MODULE_DESCRIPTION("SDMA test driver");
  325. MODULE_LICENSE("GPL");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement