Advertisement
Guest User

Untitled

a guest
Jan 21st, 2014
150
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 35.19 KB | None | 0 0
  1. /*
  2. * drivers/dma/sunxi-dma.c
  3. *
  4. * Copyright (C) 2013-2015 Allwinnertech Co., Ltd
  5. *
  6. * Author: Sugar <shuge@allwinnertech.com>
  7. *
  8. * Sunxi DMA controller driver
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. */
  15. #include <linux/bitops.h>
  16. #include <linux/clk.h>
  17. #include <linux/delay.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/io.h>
  24. #include <linux/irq.h>
  25. #include <linux/mm.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/slab.h>
  29. #include <linux/debugfs.h>
  30. #include <linux/dma/sunxi-dma.h>
  31.  
  32. #include <linux/of_address.h>
  33. #include <linux/of_irq.h>
  34. #include <linux/of_platform.h>
  35. #include <linux/of_device.h>
  36.  
  37.  
  38. #include "dmaengine.h"
  39. #include "virt-dma.h"
  40.  
  41. //#define SUNXI_GIC_START 32
  42. #define SUNXI_DMA_PBASE 0x01c02000
  43. #define SUNXI_DMA_VBASE 0xf1c02000
  44. //#define SUNXI_IRQ_DMA (SUNXI_GIC_START + 50) /* DMA */
  45. #define SUNXI_IRQ_DMA 27
  46.  
  47.  
  48. #if defined(CONFIG_ARCH_SUN8IW3)
  49. #define NR_MAX_CHAN 8 /* total of channels */
  50. #else
  51. #define NR_MAX_CHAN 16 /* total of channels */
  52. #endif
  53.  
  54. #define HIGH_CHAN 8
  55. #define DMA_IRQ_ID SUNXI_IRQ_DMA /* SUN8I: 82 SUN9I: 82 */
  56.  
  57. #define DMA_PHYS_BASE SUNXI_DMA_PBASE /* SUN8I: 0x01c02000 SUN9I: 0x00802000 */
  58.  
  59. #define DMA_IRQ_EN(x) (0x000 + ((x) << 2)) /* Interrupt enable register */
  60. #define DMA_IRQ_STAT(x) (0x010 + ((x) << 2)) /* Inetrrupt status register */
  61.  
  62. #ifdef CONFIG_ARCH_SUN9I
  63. #define DMA_SECU 0x20 /* DMA security register */
  64. #define DMA_GATE 0x28 /* DMA gating rgister */
  65. #else
  66. #define DMA_GATE 0x20 /* DMA gating rgister */
  67. #endif
  68.  
  69. #define DMA_STAT 0x30 /* DMA Status Register RO */
  70. #define DMA_ENABLE(x) (0x100 + ((x) << 6)) /* Channels enable register */
  71. #define DMA_PAUSE(x) (0x104 + ((x) << 6)) /* DMA Channels pause register */
  72. #define DMA_LLI_ADDR(x) (0x108 + ((x) << 6)) /* Descriptor address register */
  73. #define DMA_CFG(x) (0x10C + ((x) << 6)) /* Configuration register RO */
  74. #define DMA_CUR_SRC(x) (0x110 + ((x) << 6)) /* Current source address RO */
  75. #define DMA_CUR_DST(x) (0x114 + ((x) << 6)) /* Current destination address RO */
  76. #define DMA_CNT(x) (0x118 + ((x) << 6)) /* Byte counter left register RO */
  77. #define DMA_PARA(x) (0x11C + ((x) << 6)) /* Parameter register RO */
  78.  
  79. #ifdef CONFIG_ARCH_SUN9I
  80.  
  81. #define DMA_OP_MODE(x) (0x128 + ((x) << 6)) /* DMA mode options register */
  82. #define SRC_HS_MASK (0x1 << 2) /* bit 2: Source handshark mode */
  83. #define DST_HS_MASK (0x2 << 3) /* bit 3: Destination handshark mode */
  84.  
  85. #define SET_OP_MODE(d, x, val) ({ \
  86. writel(val, d->base + DMA_OP_MODE(x)); \
  87. })
  88. #define LINK_END 0x1FFFF800 /* lastest link must be 0x1ffff800 */
  89.  
  90. #else
  91.  
  92. #define DMA_OP_MODE(x)
  93. #define SRC_HS_MASK
  94. #define DST_HS_MASK
  95. #define SET_OP_MODE(d, x, val) do{}while(0)
  96. #define LINK_END 0xFFFFF800 /* lastest link must be 0xfffff800 */
  97.  
  98. #endif
  99.  
  100. #define SHIFT_IRQ_MASK(val, ch) ({ \
  101. (ch) >= HIGH_CHAN \
  102. ? (val) << ((ch - HIGH_CHAN) << 2) \
  103. : (val) << ((ch) << 2 ); \
  104. })
  105.  
  106. #define IRQ_HALF 0x01 /* Half package transfer interrupt pending */
  107. #define IRQ_PKG 0x02 /* One package complete interrupt pending */
  108. #define IRQ_QUEUE 0x04 /* All list complete transfer interrupt pending */
  109.  
  110. /* The detail information of DMA configuration */
  111. #define SRC_WIDTH(x) ((x) << 9)
  112. #define SRC_BURST(x) ((x) << 7)
  113. #define SRC_IO_MODE (0x01 << 5)
  114. #define SRC_LINEAR_MODE (0x00 << 5)
  115. #define SRC_DRQ(x) ((x) << 0)
  116.  
  117. #define DST_WIDTH(x) ((x) << 25)
  118. #define DST_BURST(x) ((x) << 23)
  119. #define DST_IO_MODE (0x01 << 21)
  120. #define DST_LINEAR_MODE (0x00 << 21)
  121. #define DST_DRQ(x) ((x) << 16)
  122.  
  123. #define CHAN_START 1
  124. #define CHAN_STOP 0
  125. #define CHAN_PAUSE 1
  126. #define CHAN_RESUME 0
  127.  
  128. #define NORMAL_WAIT (8 << 0)
  129.  
  130. /* DT matchs */
  131. static const struct of_device_id sunxi_dma_of_dev_id[] = {
  132. { .compatible = "allwinner,sun4i-dma", },
  133. {},
  134. };
  135. MODULE_DEVICE_TABLE(of, sunxi_dma_of_dev_id);
  136.  
  137. /* lli: linked list ltem, the DMA block descriptor */
  138. struct sunxi_dma_lli {
  139. u32 cfg; /* DMA configuration */
  140. dma_addr_t src; /* Source address */
  141. dma_addr_t dst; /* Destination address */
  142. u32 len; /* Length of buffers */
  143. u32 para; /* Parameter register */
  144. dma_addr_t p_lln; /* Next lli physical address */
  145. struct sunxi_dma_lli *v_lln; /* Next lli virtual address (only for cpu) */
  146. #ifdef DEBUG
  147. dma_addr_t this_phy; /* Physical address of this lli */
  148. #define set_this_phy(li, addr) \
  149. ((li)->this_phy = (addr))
  150. #else
  151. #define set_this_phy(li, addr)
  152. #endif
  153. }__attribute__((packed));
  154.  
  155. struct sunxi_dmadev {
  156. struct dma_device dma_dev;
  157. void __iomem *base;
  158. struct clk *ahb_clk; /* AHB clock gate for DMA */
  159.  
  160. spinlock_t lock;
  161. struct tasklet_struct task;
  162. struct list_head pending; /* the pending channels list */
  163. struct dma_pool *lli_pool; /* Pool of lli */
  164. };
  165.  
  166. struct sunxi_desc {
  167. struct virt_dma_desc vd;
  168. dma_addr_t lli_phys; /* physical start for llis */
  169. struct sunxi_dma_lli *lli_virt; /* virtual start for lli */
  170. };
  171.  
  172. struct sunxi_chan {
  173. struct virt_dma_chan vc;
  174.  
  175. struct list_head node; /* queue it to pending list */
  176. struct dma_slave_config cfg;
  177. bool cyclic;
  178.  
  179. struct sunxi_desc *desc;
  180. u32 irq_type;
  181. };
  182.  
  183. static inline struct sunxi_dmadev *to_sunxi_dmadev(struct dma_device *d)
  184. {
  185. return container_of(d, struct sunxi_dmadev, dma_dev);
  186. }
  187.  
  188. static inline struct sunxi_chan *to_sunxi_chan(struct dma_chan *chan)
  189. {
  190. return container_of(chan, struct sunxi_chan, vc.chan);
  191. }
  192.  
  193. static inline struct sunxi_desc *to_sunxi_desc(struct dma_async_tx_descriptor *tx)
  194. {
  195. return container_of(tx, struct sunxi_desc, vd.tx);
  196. }
  197.  
  198. static struct device *chan2dev(struct dma_chan *chan)
  199. {
  200. return &chan->dev->device;
  201. }
  202. static struct device *chan2parent(struct dma_chan *chan)
  203. {
  204. return chan->dev->device.parent;
  205. }
  206.  
  207. /*
  208. * Fix sconfig's burst size according to sunxi_dmac. We need to convert them as:
  209. * 1 -> 0, 4 -> 1, 8 -> 2, 16->3
  210. *
  211. * NOTE: burst size 2 is not supported by controller.
  212. *
  213. * This can be done by finding least significant bit set: n & (n - 1)
  214. */
  215. static inline void convert_burst(u32 *maxburst)
  216. {
  217. if (*maxburst > 1)
  218. *maxburst = fls(*maxburst) - 2;
  219. else
  220. *maxburst = 0;
  221. }
  222.  
  223. /*
  224. * Fix sconfig's bus width according to at_dmac.
  225. * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
  226. */
  227. static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
  228. {
  229. switch (addr_width) {
  230. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  231. return 1;
  232. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  233. return 2;
  234. default:
  235. /* For 1 byte width or fallback */
  236. return 0;
  237. }
  238. }
  239.  
  240. static size_t sunxi_get_desc_size(struct sunxi_desc *txd)
  241. {
  242. struct sunxi_dma_lli *lli;
  243. size_t size = 0;
  244.  
  245. for (lli = txd->lli_virt; lli != NULL; lli = lli->v_lln)
  246. size += lli->len;
  247.  
  248. return size;
  249. }
  250.  
  251. /*
  252. * sunxi_get_chan_size - get the bytes left of one channel.
  253. * @ch: the channel
  254. */
  255. static size_t sunxi_get_chan_size(struct sunxi_chan *ch)
  256. {
  257. struct sunxi_dma_lli *lli;
  258. struct sunxi_desc *txd;
  259. struct sunxi_dmadev *sdev;
  260. size_t size = 0;
  261. dma_addr_t pos;
  262. bool count = false;
  263.  
  264. txd = ch->desc;
  265.  
  266. if (!txd)
  267. return 0;
  268.  
  269. sdev = to_sunxi_dmadev(ch->vc.chan.device);
  270. pos = readl(sdev->base + DMA_LLI_ADDR(ch->vc.chan.chan_id));
  271. size = readl(sdev->base + DMA_CNT(ch->vc.chan.chan_id));
  272.  
  273. /* It is the last package, and just read count register */
  274. if (pos == LINK_END)
  275. return size;
  276.  
  277. for (lli = txd->lli_virt; lli != NULL; lli = lli->v_lln) {
  278. /* Ok, found next lli that is ready be transported */
  279. if (lli->p_lln == pos) {
  280. count = true;
  281. continue;
  282. }
  283.  
  284. if (count)
  285. size += lli->len;
  286. }
  287.  
  288. return size;
  289. }
  290.  
  291. /*
  292. * sunxi_free_desc - free the struct sunxi_desc.
  293. * @vd: the virt-desc for this chan
  294. */
  295. static void sunxi_free_desc(struct virt_dma_desc *vd)
  296. {
  297. struct sunxi_desc *txd = to_sunxi_desc(&vd->tx);
  298. struct sunxi_dmadev *sdev = to_sunxi_dmadev(vd->tx.chan->device);
  299. struct sunxi_dma_lli *li_adr, *next_virt;
  300. dma_addr_t phy, next_phy;
  301.  
  302. if (unlikely(!txd))
  303. return;
  304.  
  305. phy = txd->lli_phys;
  306. li_adr = txd->lli_virt;
  307.  
  308. while(li_adr) {
  309. next_virt = li_adr->v_lln;
  310. next_phy = li_adr->p_lln;
  311. dma_pool_free(sdev->lli_pool, li_adr, phy);
  312. li_adr = next_virt;
  313. phy = next_phy;
  314. }
  315.  
  316. kfree(txd);
  317. }
  318.  
  319. static inline void sunxi_dump_com_regs(struct sunxi_chan *ch)
  320. {
  321. struct sunxi_dmadev *sdev;
  322.  
  323. sdev = to_sunxi_dmadev(ch->vc.chan.device);
  324.  
  325. pr_debug("Common register:\n"
  326. "\tmask0(%04x): 0x%08x\n"
  327. "\tmask1(%04x): 0x%08x\n"
  328. "\tpend0(%04x): 0x%08x\n"
  329. "\tpend1(%04x): 0x%08x\n"
  330. #ifdef CONFIG_ARCH_SUN9I
  331. "\tsecur(%04x): 0x%08x\n"
  332. "\t_gate(%04x): 0x%08x\n"
  333. #endif
  334. "\tstats(%04x): 0x%08x\n",
  335. DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)),
  336. DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)),
  337. DMA_IRQ_STAT(0),readl(sdev->base + DMA_IRQ_STAT(0)),
  338. DMA_IRQ_STAT(1),readl(sdev->base + DMA_IRQ_STAT(1)),
  339. #ifdef CONFIG_ARCH_SUN9I
  340. DMA_SECU, readl(sdev->base + DMA_SECU),
  341. DMA_GATE, readl(sdev->base + DMA_GATE),
  342. #endif
  343. DMA_STAT, readl(sdev->base + DMA_STAT));
  344. }
  345.  
  346. static inline void sunxi_dump_chan_regs(struct sunxi_chan *ch)
  347. {
  348. struct sunxi_dmadev *sdev = to_sunxi_dmadev(ch->vc.chan.device);
  349. u32 chan_num = ch->vc.chan.chan_id;
  350.  
  351. pr_debug("Chan %d reg:\n"
  352. "\t___en(%04x): \t0x%08x\n"
  353. "\tpause(%04x): \t0x%08x\n"
  354. "\tstart(%04x): \t0x%08x\n"
  355. "\t__cfg(%04x): \t0x%08x\n"
  356. "\t__src(%04x): \t0x%08x\n"
  357. "\t__dst(%04x): \t0x%08x\n"
  358. "\tcount(%04x): \t0x%08x\n"
  359. "\t_para(%04x): \t0x%08x\n\n",
  360. chan_num,
  361. DMA_ENABLE(chan_num),
  362. readl(sdev->base + DMA_ENABLE(chan_num)),
  363. DMA_PAUSE(chan_num),
  364. readl(sdev->base + DMA_PAUSE(chan_num)),
  365. DMA_LLI_ADDR(chan_num),
  366. readl(sdev->base + DMA_LLI_ADDR(chan_num)),
  367. DMA_CFG(chan_num),
  368. readl(sdev->base + DMA_CFG(chan_num)),
  369. DMA_CUR_SRC(chan_num),
  370. readl(sdev->base + DMA_CUR_SRC(chan_num)),
  371. DMA_CUR_DST(chan_num),
  372. readl(sdev->base + DMA_CUR_DST(chan_num)),
  373. DMA_CNT(chan_num),
  374. readl(sdev->base + DMA_CNT(chan_num)),
  375. DMA_PARA(chan_num),
  376. readl(sdev->base + DMA_PARA(chan_num)));
  377. }
  378.  
  379.  
  380. /*
  381. * sunxi_dma_resume - resume channel, which is pause sate.
  382. * @ch: the channel to resume
  383. */
  384. static void sunxi_dma_resume(struct sunxi_chan *ch)
  385. {
  386. struct sunxi_dmadev *sdev = to_sunxi_dmadev(ch->vc.chan.device);
  387. u32 chan_num = ch->vc.chan.chan_id;
  388.  
  389. writel(CHAN_RESUME, sdev->base + DMA_PAUSE(chan_num));
  390. }
  391.  
  392. static void sunxi_dma_pause(struct sunxi_chan *ch)
  393. {
  394. struct sunxi_dmadev *sdev = to_sunxi_dmadev(ch->vc.chan.device);
  395. u32 chan_num = ch->vc.chan.chan_id;
  396.  
  397. writel(CHAN_PAUSE, sdev->base + DMA_PAUSE(chan_num));
  398. }
  399.  
  400. /*
  401. * sunxi_terminate_all - stop all descriptors that waiting transfer on chan.
  402. * @ch: the channel to stop
  403. */
  404. static int sunxi_terminate_all(struct sunxi_chan *ch)
  405. {
  406. struct sunxi_dmadev *sdev = to_sunxi_dmadev(ch->vc.chan.device);
  407. u32 chan_num = ch->vc.chan.chan_id;
  408. unsigned long flags;
  409. LIST_HEAD(head);
  410.  
  411. spin_lock_irqsave(&ch->vc.lock, flags);
  412.  
  413. spin_lock(&sdev->lock);
  414. list_del_init(&ch->node);
  415. spin_unlock(&sdev->lock);
  416.  
  417. if (ch->desc)
  418. ch->desc = NULL;
  419.  
  420. ch->cyclic = false;
  421.  
  422. writel(CHAN_STOP, sdev->base + DMA_ENABLE(chan_num));
  423.  
  424. vchan_get_all_descriptors(&ch->vc, &head);
  425. spin_unlock_irqrestore(&ch->vc.lock, flags);
  426. vchan_dma_desc_free_list(&ch->vc, &head);
  427.  
  428. return 0;
  429. }
  430.  
  431. /*
  432. * sunxi_start_desc - begin to transport the descriptor
  433. * @ch: the channel of descriptor
  434. */
  435. static void sunxi_start_desc(struct sunxi_chan *ch)
  436. {
  437. struct virt_dma_desc *vd = vchan_next_desc(&ch->vc);
  438. struct sunxi_desc *txd = to_sunxi_desc(&vd->tx);
  439. struct sunxi_dmadev *sdev = to_sunxi_dmadev(ch->vc.chan.device);
  440. u32 chan_num = ch->vc.chan.chan_id;
  441. u32 irq_val;
  442. u32 high;
  443.  
  444. if (!vd){
  445. while(readl(sdev->base + DMA_STAT) & (1 << chan_num))
  446. cpu_relax();
  447. writel(CHAN_STOP, sdev->base + DMA_ENABLE(chan_num));
  448. return;
  449. }
  450.  
  451. list_del(&vd->node);
  452.  
  453. ch->desc = txd;
  454.  
  455. while(readl(sdev->base + DMA_STAT) & (1 << chan_num))
  456. cpu_relax();
  457.  
  458. if (ch->cyclic)
  459. ch->irq_type = IRQ_PKG;
  460. else
  461. ch->irq_type = IRQ_QUEUE;
  462.  
  463. high = (chan_num >= HIGH_CHAN) ? 1 : 0;
  464.  
  465. irq_val = readl(sdev->base + DMA_IRQ_EN(high));
  466. irq_val |= SHIFT_IRQ_MASK(ch->irq_type, chan_num);
  467. writel(irq_val, sdev->base + DMA_IRQ_EN(high));
  468.  
  469. /* Set the DMA opertions mode */
  470. SET_OP_MODE(sdev, chan_num, SRC_HS_MASK | DST_HS_MASK);
  471.  
  472. /* write the first lli address to register, and start to transfer */
  473. writel(txd->lli_phys, sdev->base + DMA_LLI_ADDR(chan_num));
  474. writel(CHAN_START, sdev->base + DMA_ENABLE(chan_num));
  475.  
  476. sunxi_dump_com_regs(ch);
  477. sunxi_dump_chan_regs(ch);
  478. }
  479.  
  480. /*
  481. * sunxi_alloc_lli - Allocate a sunxi_lli
  482. * @sdev: the sunxi_dmadev
  483. * @phy_addr: return the physical address
  484. */
  485. void *sunxi_alloc_lli(struct sunxi_dmadev *sdev, dma_addr_t *phy_addr)
  486. {
  487. struct sunxi_dma_lli *l_item;
  488.  
  489. WARN_TAINT(!sdev->lli_pool, TAINT_WARN, "The dma pool is empty!!\n");
  490. if (unlikely(!sdev->lli_pool))
  491. return NULL;
  492.  
  493. l_item = dma_pool_alloc(sdev->lli_pool, GFP_NOWAIT, phy_addr);
  494. set_this_phy(l_item, *phy_addr);
  495.  
  496. return l_item;
  497. }
  498.  
  499. /*
  500. * sunxi_dump_lli - dump the information for one lli
  501. * @shcan: the channel
  502. * @lli: a lli to dump
  503. */
  504. static inline void sunxi_dump_lli(struct sunxi_chan *schan, struct sunxi_dma_lli *lli)
  505. {
  506. #ifdef DEBUG
  507. dev_dbg(chan2dev(&schan->vc.chan),
  508. "\n\tdesc: p - 0x%08x v - 0x%08x \n"
  509. "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
  510. "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
  511. lli->this_phy, (u32)lli,
  512. lli->cfg, lli->src, lli->dst,
  513. lli->len, lli->para, lli->p_lln);
  514. #endif
  515. }
  516.  
  517. static void *sunxi_lli_list(struct sunxi_dma_lli *prev, struct sunxi_dma_lli *next,
  518. dma_addr_t next_phy, struct sunxi_desc *txd)
  519. {
  520. if ((!prev && !txd) || !next)
  521. return NULL;
  522.  
  523. if (!prev){
  524. txd->lli_phys = next_phy;
  525. txd->lli_virt = next;
  526. } else {
  527. prev->p_lln = next_phy;
  528. prev->v_lln = next;
  529. }
  530.  
  531. next->p_lln = LINK_END;
  532. next->v_lln = NULL;
  533.  
  534. return next;
  535. }
  536.  
  537. static inline void sunxi_cfg_lli(struct sunxi_dma_lli *lli, dma_addr_t src,
  538. dma_addr_t dst, u32 len, struct dma_slave_config *config)
  539. {
  540. u32 src_width, dst_width;
  541.  
  542. if (!config)
  543. return;
  544.  
  545. /* Get the data width */
  546. src_width = convert_buswidth(config->src_addr_width);
  547. dst_width = convert_buswidth(config->dst_addr_width);
  548.  
  549. lli->cfg = SRC_BURST(config->src_maxburst)
  550. | SRC_WIDTH(src_width)
  551. | DST_BURST(config->dst_maxburst)
  552. | DST_WIDTH(dst_width);
  553.  
  554. lli->src = src;
  555. lli->dst = dst;
  556. lli->len = len;
  557. lli->para = NORMAL_WAIT;
  558.  
  559. }
  560.  
  561.  
  562. /*
  563. * sunxi_dma_tasklet - ensure that the desc's lli be putted into hardware.
  564. * @data: sunxi_dmadev
  565. */
  566. static void sunxi_dma_tasklet(unsigned long data)
  567. {
  568. struct sunxi_dmadev *sdev = (struct sunxi_dmadev *)data;
  569. LIST_HEAD(head);
  570.  
  571. spin_lock_irq(&sdev->lock);
  572. list_splice_tail_init(&sdev->pending, &head);
  573. spin_unlock_irq(&sdev->lock);
  574.  
  575. while (!list_empty(&head)) {
  576. struct sunxi_chan *c = list_first_entry(&head,
  577. struct sunxi_chan, node);
  578.  
  579. spin_lock_irq(&c->vc.lock);
  580. list_del_init(&c->node);
  581. sunxi_start_desc(c);
  582. spin_unlock_irq(&c->vc.lock);
  583. }
  584. }
  585.  
  586. /*
  587. * sunxi_dma_interrupt - interrupt handle.
  588. * @irq: irq number
  589. * @dev_id: sunxi_dmadev
  590. */
  591. static irqreturn_t sunxi_dma_interrupt(int irq, void *dev_id)
  592. {
  593. struct sunxi_dmadev *sdev = (struct sunxi_dmadev *)dev_id;
  594. struct sunxi_chan *ch;
  595. struct sunxi_desc *desc;
  596. unsigned long flags;
  597. u32 status_lo = 0, status_hi = 0;
  598.  
  599. /* Get the status of irq */
  600. status_lo = readl(sdev->base + DMA_IRQ_STAT(0));
  601. #ifndef CONFIG_ARCH_SUN8IW3
  602. status_hi = readl(sdev->base + DMA_IRQ_STAT(1));
  603. #endif
  604.  
  605. dev_dbg(sdev->dma_dev.dev, "[sunxi_dma]: DMA irq status_lo: 0x%08x, "
  606. "status_hi: 0x%08x\n", status_lo, status_hi);
  607.  
  608. /* Clear the bit of irq status */
  609. writel(status_lo, sdev->base + DMA_IRQ_STAT(0));
  610. #ifndef CONFIG_ARCH_SUN8IW3
  611. writel(status_hi, sdev->base + DMA_IRQ_STAT(1));
  612. #endif
  613.  
  614. list_for_each_entry(ch, &sdev->dma_dev.channels, vc.chan.device_node) {
  615. u32 chan_num = ch->vc.chan.chan_id;
  616. u32 status;
  617.  
  618. status = (chan_num >= HIGH_CHAN)
  619. ? (status_hi >> ((chan_num - HIGH_CHAN) <<2))
  620. : (status_lo >> (chan_num << 2));
  621.  
  622. if (!(ch->irq_type & status))
  623. continue;
  624.  
  625. if (!ch->desc)
  626. continue;
  627.  
  628. spin_lock_irqsave(&ch->vc.lock, flags);
  629. desc = ch->desc;
  630. if (ch->cyclic) {
  631. vchan_cyclic_callback(&desc->vd);
  632. } else {
  633. ch->desc = NULL;
  634. vchan_cookie_complete(&desc->vd);
  635. sunxi_start_desc(ch);
  636. }
  637. spin_unlock_irqrestore(&ch->vc.lock, flags);
  638. }
  639.  
  640. return IRQ_HANDLED;
  641. }
  642.  
  643. static struct dma_async_tx_descriptor *sunxi_prep_dma_memcpy(
  644. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  645. size_t len, unsigned long flags)
  646. {
  647. struct sunxi_chan *schan = to_sunxi_chan(chan);
  648. struct sunxi_desc *txd;
  649. struct sunxi_dma_lli *l_item;
  650. struct sunxi_dmadev *sdev = to_sunxi_dmadev(chan->device);
  651. struct dma_slave_config *sconfig = &schan->cfg;
  652. dma_addr_t phy;
  653.  
  654. dev_dbg(chan2dev(chan), "%s; chan: %d, dest: 0x%08x, "
  655. "src: 0x%08x, len: 0x%08x. flags: 0x%08lx\n",
  656. __func__, schan->vc.chan.chan_id, dest, src, len, flags);
  657.  
  658. if (unlikely(!len)) {
  659. dev_dbg(chan2dev(chan), "%s: memcpy length is zero!!\n", __func__);
  660. return NULL;
  661. }
  662.  
  663. txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  664. if (!txd) {
  665. dev_err(chan2dev(chan), "%s: Failed to alloc sunxi_desc!!\n", __func__);
  666. return NULL;
  667. }
  668. vchan_tx_prep(&schan->vc, &txd->vd, flags);
  669.  
  670. l_item = sunxi_alloc_lli(sdev, &phy);
  671. if (!l_item) {
  672. sunxi_free_desc(&txd->vd);
  673. dev_err(sdev->dma_dev.dev, "Failed to alloc lli memory!!!\n");
  674. return NULL;
  675. }
  676.  
  677. sunxi_cfg_lli(l_item, src, dest, len, sconfig);
  678. l_item->cfg |= SRC_DRQ(DRQSRC_SDRAM)
  679. | DST_DRQ(DRQDST_SDRAM)
  680. | DST_LINEAR_MODE
  681. | SRC_LINEAR_MODE;
  682.  
  683. sunxi_lli_list(NULL, l_item, phy, txd);
  684.  
  685. sunxi_dump_lli(schan, l_item);
  686.  
  687. return &txd->vd.tx;
  688. }
  689.  
  690. static struct dma_async_tx_descriptor *sunxi_prep_dma_sg(
  691. struct dma_chan *chan,
  692. struct scatterlist *dst_sg, unsigned int dst_nents,
  693. struct scatterlist *src_sg, unsigned int src_nents,
  694. unsigned long flags)
  695. {
  696. struct sunxi_chan *schan = to_sunxi_chan(chan);
  697. struct sunxi_dmadev *sdev = to_sunxi_dmadev(chan->device);
  698. struct dma_slave_config *sconfig = &schan->cfg;
  699. struct sunxi_desc *txd;
  700. struct sunxi_dma_lli *l_item, *prev = NULL;
  701. dma_addr_t phy;
  702.  
  703. if (dst_nents != src_nents)
  704. return NULL;
  705.  
  706. if (!dst_nents || !src_nents)
  707. return NULL;
  708.  
  709. if (dst_sg == NULL || src_sg == NULL)
  710. return NULL;
  711.  
  712. txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  713. if (!txd) {
  714. dev_err(chan2dev(chan), "%s: Failed to alloc sunxi_desc!!\n", __func__);
  715. return NULL;
  716. }
  717. vchan_tx_prep(&schan->vc, &txd->vd, flags);
  718.  
  719. while ((src_sg != NULL) && (dst_sg != NULL)) {
  720. l_item = sunxi_alloc_lli(sdev, &phy);
  721. if (!l_item) {
  722. sunxi_free_desc(&txd->vd);
  723. return NULL;
  724. }
  725.  
  726. sunxi_cfg_lli(l_item, sg_dma_address(src_sg),
  727. sg_dma_address(dst_sg), sg_dma_len(dst_sg), sconfig);
  728. l_item->cfg |= DST_LINEAR_MODE
  729. | DST_LINEAR_MODE
  730. | GET_DST_DRQ(sconfig->slave_id)
  731. | GET_SRC_DRQ(sconfig->slave_id);
  732.  
  733. prev = sunxi_lli_list(prev, l_item, phy, txd);
  734. src_sg = sg_next(src_sg);
  735. dst_sg = sg_next(dst_sg);
  736. }
  737.  
  738. #ifdef DEBUG
  739. pr_debug("[sunxi_dma]: First: 0x%08x\n", txd->lli_phys);
  740. for(prev = txd->lli_virt; prev != NULL; prev = prev->v_lln)
  741. sunxi_dump_lli(schan, prev);
  742. #endif
  743.  
  744. return &txd->vd.tx;
  745. }
  746.  
  747. static struct dma_async_tx_descriptor *sunxi_prep_slave_sg(
  748. struct dma_chan *chan, struct scatterlist *sgl,
  749. unsigned int sg_len, enum dma_transfer_direction dir,
  750. unsigned long flags, void *context)
  751. {
  752. struct sunxi_chan *schan = to_sunxi_chan(chan);
  753. struct sunxi_desc *txd;
  754. struct sunxi_dma_lli *l_item, *prev = NULL;
  755. struct sunxi_dmadev *sdev = to_sunxi_dmadev(chan->device);
  756. struct dma_slave_config *sconfig = &schan->cfg;
  757.  
  758. struct scatterlist *sg;
  759. dma_addr_t phy;
  760. unsigned int i;
  761.  
  762. if (unlikely(!sg_len)) {
  763. dev_dbg(chan2dev(chan), "%s: sg length is zero!!\n", __func__);
  764. return NULL;
  765. }
  766.  
  767. txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  768. if (!txd) {
  769. dev_err(chan2dev(chan), "%s: Failed to alloc sunxi_desc!!\n", __func__);
  770. return NULL;
  771. }
  772. vchan_tx_prep(&schan->vc, &txd->vd, flags);
  773.  
  774. for_each_sg(sgl, sg, sg_len, i) {
  775. l_item = sunxi_alloc_lli(sdev, &phy);
  776. if (!l_item) {
  777. sunxi_free_desc(&txd->vd);
  778. return NULL;
  779. }
  780.  
  781. if (dir == DMA_MEM_TO_DEV) {
  782. sunxi_cfg_lli(l_item, sg_dma_address(sg),
  783. sconfig->dst_addr, sg_dma_len(sg), sconfig);
  784. l_item->cfg |= DST_IO_MODE
  785. | SRC_LINEAR_MODE
  786. | SRC_DRQ(DRQSRC_SDRAM)
  787. | GET_DST_DRQ(sconfig->slave_id);
  788.  
  789. } else if (dir == DMA_DEV_TO_MEM) {
  790. sunxi_cfg_lli(l_item, sconfig->src_addr,
  791. sg_dma_address(sg), sg_dma_len(sg), sconfig);
  792. l_item->cfg |= DST_LINEAR_MODE
  793. | SRC_IO_MODE
  794. | DST_DRQ(DRQDST_SDRAM)
  795. | GET_SRC_DRQ(sconfig->slave_id);
  796. }
  797.  
  798. prev = sunxi_lli_list(prev, l_item, phy, txd);
  799. }
  800.  
  801. #ifdef DEBUG
  802. pr_debug("[sunxi_dma]: First: 0x%08x\n", txd->lli_phys);
  803. for(prev = txd->lli_virt; prev != NULL; prev = prev->v_lln)
  804. sunxi_dump_lli(schan, prev);
  805. #endif
  806.  
  807. return &txd->vd.tx;
  808. }
  809.  
  810. /**
  811. * sunxi_prep_dma_cyclic - prepare the cyclic DMA transfer
  812. * @chan: the DMA channel to prepare
  813. * @buf_addr: physical DMA address where the buffer starts
  814. * @buf_len: total number of bytes for the entire buffer
  815. * @period_len: number of bytes for each period
  816. * @dir: transfer direction, to or from device
  817. *
  818. * Must be called before trying to start the transfer. Returns a valid struct
  819. * sunxi_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
  820. */
  821. struct dma_async_tx_descriptor *sunxi_prep_dma_cyclic( struct dma_chan *chan,
  822. dma_addr_t buf_addr, size_t buf_len, size_t period_len,
  823. enum dma_transfer_direction dir, unsigned long flags, void *context)
  824. {
  825. struct sunxi_desc *txd;
  826. struct sunxi_chan *schan = to_sunxi_chan(chan);
  827. struct sunxi_dmadev *sdev = to_sunxi_dmadev(chan->device);
  828. struct sunxi_dma_lli *l_item, *prev = NULL;
  829. struct dma_slave_config *sconfig = &schan->cfg;
  830.  
  831. dma_addr_t phy;
  832. unsigned int periods = buf_len / period_len;
  833. unsigned int i;
  834.  
  835. txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
  836. if (!txd) {
  837. dev_err(chan2dev(chan), "%s: Failed to alloc sunxi_desc!!\n", __func__);
  838. return NULL;
  839. }
  840. vchan_tx_prep(&schan->vc, &txd->vd, flags);
  841.  
  842. for (i = 0; i < periods; i++){
  843. l_item = sunxi_alloc_lli(sdev, &phy);
  844. if (!l_item) {
  845. sunxi_free_desc(&txd->vd);
  846. return NULL;
  847. }
  848.  
  849.  
  850. if (dir == DMA_MEM_TO_DEV) {
  851. sunxi_cfg_lli(l_item, (buf_addr + period_len * i),
  852. sconfig->dst_addr, period_len, sconfig);
  853. l_item->cfg |= GET_DST_DRQ(sconfig->slave_id)
  854. | SRC_LINEAR_MODE
  855. | DST_IO_MODE
  856. | SRC_DRQ(DRQSRC_SDRAM);
  857. } else if (dir == DMA_DEV_TO_MEM) {
  858. sunxi_cfg_lli(l_item, sconfig->src_addr,
  859. (buf_addr + period_len * i), period_len, sconfig);
  860. l_item->cfg |= GET_SRC_DRQ(sconfig->slave_id)
  861. | DST_LINEAR_MODE
  862. | SRC_IO_MODE
  863. | DST_DRQ(DRQDST_SDRAM);
  864. }
  865.  
  866. prev = sunxi_lli_list(prev, l_item, phy, txd);
  867.  
  868. }
  869.  
  870. /* Make a cyclic list */
  871. prev->p_lln = txd->lli_phys;
  872. schan->cyclic = true;
  873.  
  874. #ifdef DEBUG
  875. pr_debug("[sunxi_dma]: First: 0x%08x\n", txd->lli_phys);
  876. for(prev = txd->lli_virt; prev != NULL; prev = prev->v_lln)
  877. sunxi_dump_lli(schan, prev);
  878. #endif
  879.  
  880. return &txd->vd.tx;
  881. }
  882.  
  883. static int sunxi_set_runtime_config(struct dma_chan *chan,
  884. struct dma_slave_config *sconfig)
  885. {
  886. struct sunxi_chan *schan = to_sunxi_chan(chan);
  887.  
  888. memcpy(&schan->cfg, sconfig, sizeof(struct dma_slave_config));
  889.  
  890. convert_burst(&schan->cfg.src_maxburst);
  891. convert_burst(&schan->cfg.dst_maxburst);
  892.  
  893. return 0;
  894. }
  895.  
  896. static int sunxi_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
  897. unsigned long arg)
  898. {
  899. struct sunxi_chan *schan = to_sunxi_chan(chan);
  900. int ret = 0;
  901.  
  902. switch(cmd) {
  903. case DMA_RESUME:
  904. sunxi_dma_resume(schan);
  905. break;
  906. case DMA_PAUSE:
  907. sunxi_dma_pause(schan);
  908. break;
  909. case DMA_TERMINATE_ALL:
  910. ret = sunxi_terminate_all(schan);
  911. break;
  912. case DMA_SLAVE_CONFIG:
  913. ret = sunxi_set_runtime_config(chan, (struct dma_slave_config *)arg);
  914. break;
  915. default:
  916. ret = -ENXIO;
  917. break;
  918. }
  919. return ret;
  920. }
  921.  
  922. static enum dma_status sunxi_tx_status(struct dma_chan *chan,
  923. dma_cookie_t cookie, struct dma_tx_state *txstate)
  924. {
  925. struct sunxi_chan *schan = to_sunxi_chan(chan);
  926. struct virt_dma_desc *vd;
  927. enum dma_status ret;
  928. unsigned long flags;
  929. size_t bytes = 0;
  930.  
  931. ret = dma_cookie_status(chan, cookie, txstate);
  932. if (ret == DMA_COMPLETE || !txstate) {
  933. return ret;
  934. }
  935.  
  936. spin_lock_irqsave(&schan->vc.lock, flags);
  937. vd = vchan_find_desc(&schan->vc, cookie);
  938. if (vd) {
  939. bytes = sunxi_get_desc_size(to_sunxi_desc(&vd->tx));
  940. } else if (schan->desc && schan->desc->vd.tx.cookie == cookie) {
  941. bytes = sunxi_get_chan_size(to_sunxi_chan(chan));
  942. }
  943.  
  944. /*
  945. * This cookie not complete yet
  946. * Get number of bytes left in the active transactions and queue
  947. */
  948. dma_set_residue(txstate, bytes);
  949. spin_unlock_irqrestore(&schan->vc.lock, flags);
  950.  
  951. return ret;
  952. }
  953.  
  954. /*
  955. * sunxi_issue_pending - try to finish work
  956. * @chan: target DMA channel
  957. *
  958. * It will call vchan_issue_pending(), which can move the desc_submitted
  959. * list to desc_issued list. And we will move the chan to pending list of
  960. * sunxi_dmadev.
  961. */
  962. static void sunxi_issue_pending(struct dma_chan *chan)
  963. {
  964. struct sunxi_chan *schan = to_sunxi_chan(chan);
  965. struct sunxi_dmadev *sdev = to_sunxi_dmadev(chan->device);
  966. unsigned long flags;
  967.  
  968. spin_lock_irqsave(&schan->vc.lock, flags);
  969. if (vchan_issue_pending(&schan->vc) && !schan->desc) {
  970. if (schan->cyclic){
  971. sunxi_start_desc(schan);
  972. goto out;
  973. }
  974.  
  975. spin_lock(&sdev->lock);
  976. if (list_empty(&schan->node))
  977. list_add_tail(&schan->node, &sdev->pending);
  978. spin_unlock(&sdev->lock);
  979. tasklet_schedule(&sdev->task);
  980. }
  981. out:
  982. spin_unlock_irqrestore(&schan->vc.lock, flags);
  983. }
  984.  
  985. static int sunxi_alloc_chan_resources(struct dma_chan *chan)
  986. {
  987. struct sunxi_chan *schan = to_sunxi_chan(chan);
  988. dev_dbg(chan2parent(chan), "%s: Now alloc chan resources!\n", __func__);
  989.  
  990. schan->cyclic = false;
  991.  
  992. return 0;
  993. }
  994.  
  995. /*
  996. * sunxi_free_chan_resources - free the resources of channel
  997. * @chan: the channel to free
  998. */
  999. static void sunxi_free_chan_resources(struct dma_chan *chan)
  1000. {
  1001. struct sunxi_chan *schan = to_sunxi_chan(chan);
  1002.  
  1003. vchan_free_chan_resources(&schan->vc);
  1004.  
  1005. dev_dbg(chan2parent(chan), "%s: Now free chan resources!!\n", __func__);
  1006. }
  1007.  
  1008. /*
  1009. * sunxi_chan_free - free the channle on dmadevice
  1010. * @sdev: the dmadevice of sunxi
  1011. */
  1012. static inline void sunxi_chan_free(struct sunxi_dmadev *sdev)
  1013. {
  1014. struct sunxi_chan *ch;
  1015.  
  1016. tasklet_kill(&sdev->task);
  1017. while(!list_empty(&sdev->dma_dev.channels)) {
  1018. ch = list_first_entry(&sdev->dma_dev.channels,
  1019. struct sunxi_chan, vc.chan.device_node);
  1020. list_del(&ch->vc.chan.device_node);
  1021. tasklet_kill(&ch->vc.task);
  1022. kfree(ch);
  1023. }
  1024.  
  1025. }
  1026.  
  1027. static void sunxi_dma_hw_init(struct sunxi_dmadev *dev)
  1028. {
  1029. struct sunxi_dmadev *sunxi_dev = dev;
  1030.  
  1031. clk_prepare_enable(sunxi_dev->ahb_clk);
  1032. #ifdef CONFIG_ARCH_SUN8IW3
  1033. writel(0x04, sunxi_dev->base + DMA_GATE);
  1034. #endif
  1035. }
  1036.  
  1037. static int sunxi_probe(struct platform_device *pdev)
  1038. {
  1039. struct sunxi_dmadev *sunxi_dev;
  1040. struct sunxi_chan *schan;
  1041. struct resource res;
  1042. int irq;
  1043. int ret, i;
  1044. int err = 0;
  1045. const struct of_device_id *of_id;
  1046.  
  1047. sunxi_dev = kzalloc(sizeof(struct sunxi_dmadev), GFP_KERNEL);
  1048. if (!sunxi_dev) {
  1049. printk("dma: kzalloc error \n");
  1050. return -ENOMEM;
  1051. }
  1052.  
  1053. /* DEVICE MATCH */
  1054. of_id = of_match_device(sunxi_dma_of_dev_id, &pdev->dev);
  1055. if (of_id) {
  1056. printk("dma: of_match_device fail\n");
  1057. pdev->id_entry = of_id->data;
  1058. }
  1059. else {
  1060. printk("dma: of_match_device ok\n");
  1061. }
  1062. /* if (!of_id) {
  1063. printk("dma: of_match_device error\n");
  1064. return -EINVAL;
  1065. }
  1066. if (of_id)
  1067. pdev->id_entry = of_id->data;*/
  1068.  
  1069. /* GET REGISTER BASE ADRESS + LENGTH */
  1070. err = 0;
  1071. err = of_address_to_resource(pdev->dev.of_node, 0, &res);
  1072. if (err) {
  1073. dev_err(&pdev->dev, "unable to find 'reg' property\n");
  1074. printk("dma: unable to find 'reg' property %d\n",-EINVAL);
  1075. ret = -EINVAL;
  1076. goto io_err;
  1077. }
  1078. else {
  1079. dev_err(&pdev->dev, "dma: reg ok now\n");
  1080. printk("dma: reg ok now\n");
  1081. }
  1082.  
  1083. printk("dma: res_start %x\n",res.start);
  1084. printk("dma: res_end %x\n",res.end);
  1085.  
  1086. /*
  1087. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1088. if (!res) {
  1089. ret = -EINVAL;
  1090. goto io_err;
  1091. }
  1092. */
  1093.  
  1094. /* IOMAP REGISTERS */
  1095. sunxi_dev->base = of_iomap(pdev->dev.of_node, 0);
  1096.  
  1097. if (!sunxi_dev->base) {
  1098. dev_err(&pdev->dev, "unable to iomap the registers\n");
  1099. printk("dma: unable to iomap the registers\n");
  1100. ret = -ENOMEM;
  1101. goto io_err;
  1102. }
  1103.  
  1104. /*
  1105. sunxi_dev->base = ioremap(res->start, resource_size(res));
  1106. if (!sunxi_dev->base) {
  1107. dev_err(&pdev->dev, "Remap I/O memory failed!\n");
  1108. ret = -ENOMEM;
  1109. goto io_err;
  1110. }
  1111. */
  1112.  
  1113. /* GETTING MAPED IRQ # GOT FROM DT */
  1114. irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  1115. if (irq < 0) {
  1116. dev_err(&pdev->dev, "Parse & map IRQ from dt failed!\n");
  1117. printk("dma: Parse & map IRQ from dt failed!\n");
  1118. ret = irq;
  1119. goto irq_err;
  1120. }
  1121. printk("dma: irq %d\n",irq);
  1122.  
  1123. /*
  1124. irq = platform_get_irq(pdev, 0);
  1125. if (irq < 0) {
  1126. ret = irq;
  1127. goto irq_err;
  1128. }
  1129. */
  1130.  
  1131. ret = request_irq(irq, sunxi_dma_interrupt, IRQF_SHARED,
  1132. dev_name(&pdev->dev), sunxi_dev);
  1133. if (ret) {
  1134. dev_err(&pdev->dev, "NO IRQ found!!!\n");
  1135. printk("dma: No IRQ found!\n");
  1136. goto irq_err;
  1137. }
  1138.  
  1139.  
  1140. /* GETTING CLK INFO FROM DT */
  1141. /* //ahb_clk = clk_get(&pdev->dev, "dma");
  1142. sunxi_dev->ahb_clk = devm_clk_get(&pdev->dev, NULL);
  1143. if (!sunxi_dev->ahb_clk) {
  1144. dev_err(&pdev->dev, "NO clock to dma!!!\n");
  1145. ret = -EINVAL;
  1146. goto clk_err;
  1147. }
  1148. //printk("clk before\n",sunxi_dev->ahb_clk);
  1149. */
  1150.  
  1151. sunxi_dev->ahb_clk = clk_get(&pdev->dev, NULL);
  1152. if (!sunxi_dev->ahb_clk) {
  1153. dev_err(&pdev->dev, "NO clock to dma!!!\n");
  1154. printk("dma: no clock to dma!\n");
  1155. ret = -EINVAL;
  1156. goto clk_err;
  1157. }
  1158.  
  1159. sunxi_dev->lli_pool = dma_pool_create(dev_name(&pdev->dev), &pdev->dev,
  1160. sizeof(struct sunxi_dma_lli), 4/* word alignment */, 0);
  1161. if (!sunxi_dev->lli_pool) {
  1162. printk("dma: lli_pool error\n");
  1163. ret = -ENOMEM;
  1164. goto pool_err;
  1165. }
  1166.  
  1167. platform_set_drvdata(pdev, sunxi_dev);
  1168. INIT_LIST_HEAD(&sunxi_dev->pending);
  1169. spin_lock_init(&sunxi_dev->lock);
  1170.  
  1171. /* Initialize dmaengine */
  1172. dma_cap_set(DMA_MEMCPY, sunxi_dev->dma_dev.cap_mask);
  1173. dma_cap_set(DMA_SLAVE, sunxi_dev->dma_dev.cap_mask);
  1174. dma_cap_set(DMA_CYCLIC, sunxi_dev->dma_dev.cap_mask);
  1175. dma_cap_set(DMA_SG, sunxi_dev->dma_dev.cap_mask);
  1176.  
  1177. INIT_LIST_HEAD(&sunxi_dev->dma_dev.channels);
  1178. sunxi_dev->dma_dev.device_alloc_chan_resources = sunxi_alloc_chan_resources;
  1179. sunxi_dev->dma_dev.device_free_chan_resources = sunxi_free_chan_resources;
  1180. sunxi_dev->dma_dev.device_tx_status = sunxi_tx_status;
  1181. sunxi_dev->dma_dev.device_issue_pending = sunxi_issue_pending;
  1182. sunxi_dev->dma_dev.device_prep_dma_sg = sunxi_prep_dma_sg;
  1183. sunxi_dev->dma_dev.device_prep_slave_sg = sunxi_prep_slave_sg;
  1184. sunxi_dev->dma_dev.device_prep_dma_cyclic = sunxi_prep_dma_cyclic;
  1185. sunxi_dev->dma_dev.device_prep_dma_memcpy = sunxi_prep_dma_memcpy;
  1186. sunxi_dev->dma_dev.device_control = sunxi_control;
  1187.  
  1188. sunxi_dev->dma_dev.dev = &pdev->dev;
  1189.  
  1190. tasklet_init(&sunxi_dev->task, sunxi_dma_tasklet, (unsigned long)sunxi_dev);
  1191.  
  1192. for (i = 0; i < NR_MAX_CHAN; i++){
  1193. schan = kzalloc(sizeof(*schan), GFP_KERNEL);
  1194. if (!schan){
  1195. dev_err(&pdev->dev, "%s: no memory for channel\n", __func__);
  1196. ret = -ENOMEM;
  1197. printk("dma: no memory!\n");
  1198. goto chan_err;
  1199. }
  1200. INIT_LIST_HEAD(&schan->node);
  1201. sunxi_dev->dma_dev.chancnt++;
  1202. schan->vc.desc_free = sunxi_free_desc;
  1203. vchan_init(&schan->vc, &sunxi_dev->dma_dev);
  1204. }
  1205.  
  1206. /* Register the sunxi-dma to dmaengine */
  1207. ret = dma_async_device_register(&sunxi_dev->dma_dev);
  1208. if (ret) {
  1209. dev_warn(&pdev->dev, "Failed to register DMA engine device: %d\n", ret);
  1210. printk("dma: failed to register dma engine device: %d\n", ret);
  1211. goto chan_err;
  1212. }
  1213.  
  1214. /* All is ok, and open the clock */
  1215. sunxi_dma_hw_init(sunxi_dev);
  1216.  
  1217. return 0;
  1218.  
  1219. chan_err:
  1220. sunxi_chan_free(sunxi_dev);
  1221. platform_set_drvdata(pdev, NULL);
  1222. dma_pool_destroy(sunxi_dev->lli_pool);
  1223. pool_err:
  1224. clk_put(sunxi_dev->ahb_clk);
  1225. clk_err:
  1226. free_irq(irq, sunxi_dev);
  1227. irq_err:
  1228. iounmap(sunxi_dev->base);
  1229. io_err:
  1230. kfree(sunxi_dev);
  1231. return ret;
  1232. }
  1233.  
  1234. bool sunxi_dma_filter_fn(struct dma_chan *chan, void *param)
  1235. {
  1236. return true;
  1237. }
  1238. EXPORT_SYMBOL_GPL(sunxi_dma_filter_fn);
  1239.  
  1240. static int sunxi_remove(struct platform_device *pdev)
  1241. {
  1242. struct sunxi_dmadev *sunxi_dev = platform_get_drvdata(pdev);
  1243.  
  1244. dma_async_device_unregister(&sunxi_dev->dma_dev);
  1245.  
  1246. sunxi_chan_free(sunxi_dev);
  1247.  
  1248. free_irq(platform_get_irq(pdev, 0), sunxi_dev);
  1249. dma_pool_destroy(sunxi_dev->lli_pool);
  1250. clk_disable_unprepare(sunxi_dev->ahb_clk);
  1251. clk_put(sunxi_dev->ahb_clk);
  1252. iounmap(sunxi_dev->base);
  1253. kfree(sunxi_dev);
  1254.  
  1255. return 0;
  1256. }
  1257.  
  1258. static void sunxi_shutdown(struct platform_device *pdev)
  1259. {
  1260. struct sunxi_dmadev *sdev = platform_get_drvdata(pdev);
  1261.  
  1262. clk_disable_unprepare(sdev->ahb_clk);
  1263. }
  1264.  
  1265. static int sunxi_suspend_noirq(struct device *dev)
  1266. {
  1267. struct platform_device *pdev = to_platform_device(dev);
  1268. struct sunxi_dmadev *sunxi_dev = platform_get_drvdata(pdev);
  1269.  
  1270. clk_disable_unprepare(sunxi_dev->ahb_clk);
  1271. return 0;
  1272. }
  1273.  
  1274. static int sunxi_resume_noirq(struct device *dev)
  1275. {
  1276. struct platform_device *pdev = to_platform_device(dev);
  1277. struct sunxi_dmadev *sunxi_dev = platform_get_drvdata(pdev);
  1278.  
  1279. sunxi_dma_hw_init(sunxi_dev);
  1280. return 0;
  1281. }
  1282.  
  1283. static const struct dev_pm_ops sunxi_dev_pm_ops = {
  1284. .suspend_noirq = sunxi_suspend_noirq,
  1285. .resume_noirq = sunxi_resume_noirq,
  1286. .freeze_noirq = sunxi_suspend_noirq,
  1287. .thaw_noirq = sunxi_resume_noirq,
  1288. .restore_noirq = sunxi_resume_noirq,
  1289. .poweroff_noirq = sunxi_suspend_noirq,
  1290. };
  1291.  
  1292. static struct resource sunxi_dma_reousce[] = {
  1293. [0] = {
  1294. .start = DMA_PHYS_BASE,
  1295. .end = DMA_PHYS_BASE + DMA_PARA(15),
  1296. .flags = IORESOURCE_MEM,
  1297. },
  1298. [1] = {
  1299. .start = DMA_IRQ_ID,
  1300. .end = DMA_IRQ_ID,
  1301. .flags = IORESOURCE_IRQ,
  1302. },
  1303. };
  1304.  
  1305. u64 sunxi_dma_mask = DMA_BIT_MASK(32);
  1306. static struct platform_device sunxi_dma_device = {
  1307. .name = "sunxi_dmac",
  1308. .id = -1,
  1309. .resource = sunxi_dma_reousce,
  1310. .num_resources = ARRAY_SIZE(sunxi_dma_reousce),
  1311. .dev = {
  1312. .dma_mask = &sunxi_dma_mask,
  1313. .coherent_dma_mask = DMA_BIT_MASK(32),
  1314. },
  1315. };
  1316.  
  1317. static struct platform_driver sunxi_dma_driver = {
  1318. .probe = sunxi_probe,
  1319. .remove = sunxi_remove,
  1320. .shutdown = sunxi_shutdown,
  1321. .driver = {
  1322. .name = "sunxi_dmac",
  1323. .pm = &sunxi_dev_pm_ops,
  1324. .of_match_table = sunxi_dma_of_dev_id,
  1325. },
  1326. };
  1327.  
  1328. static int __init sunxi_dma_init(void)
  1329. {
  1330. int ret;
  1331.  
  1332. platform_device_register(&sunxi_dma_device);
  1333. ret = platform_driver_register(&sunxi_dma_driver);
  1334.  
  1335. return ret;
  1336. }
  1337. subsys_initcall(sunxi_dma_init);
  1338.  
  1339. static void __exit sunxi_dma_exit(void)
  1340. {
  1341. platform_driver_unregister(&sunxi_dma_driver);
  1342. platform_device_unregister(&sunxi_dma_device);
  1343. }
  1344. module_exit(sunxi_dma_exit);
  1345.  
  1346. MODULE_LICENSE("GPL");
  1347. MODULE_DESCRIPTION("Sunxi DMA Controller driver");
  1348. MODULE_AUTHOR("Shuge");
  1349. MODULE_ALIAS("platform:sunxi_dmac");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement