Advertisement
nicolaerosia

Xilinx DMA

Jun 16th, 2015
163
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 33.86 KB | None | 0 0
  1. /*
  2.  * Xilinx AXI DMA Engine support
  3.  *
  4.  * Copyright (C) 2012 - 2013 Xilinx, Inc. All rights reserved.
  5.  *
  6.  * Based on the Freescale DMA driver.
  7.  *
  8.  * Description:
  9.  *  . Axi DMA engine, it does transfers between memory and device. It can be
  10.  *    configured to have one channel or two channels. If configured as two
  11.  *    channels, one is to transmit to a device and another is to receive from
  12.  *    a device.
  13.  *
  14.  * This is free software; you can redistribute it and/or modify
  15.  * it under the terms of the GNU General Public License as published by
  16.  * the Free Software Foundation; either version 2 of the License, or
  17.  * (at your option) any later version.
  18.  */
  19. //#define DEBUG
  20.  
  21. #include <linux/amba/xilinx_dma.h>
  22. #include <linux/dmapool.h>
  23. #include <linux/init.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/io.h>
  26. #include <linux/irqdomain.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_address.h>
  30. #include <linux/of_irq.h>
  31. #include <linux/of_platform.h>
  32. #include <linux/platform_device.h>
  33. #include <linux/slab.h>
  34.  
  35. #include "../dmaengine.h"
  36.  
  37. /* Hw specific definitions */
  38. #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 /* Max no of channels */
  39. #define XILINX_DMA_MAX_TRANS_LEN    0x7FFFFF /* Max transfer length */
  40.  
  41. /* Register Offsets */
  42. #define XILINX_DMA_CONTROL_OFFSET   0x00 /* Control Reg */
  43. #define XILINX_DMA_STATUS_OFFSET    0x04 /* Status Reg */
  44. #define XILINX_DMA_CDESC_OFFSET     0x08 /* Current descriptor Reg */
  45. #define XILINX_DMA_TDESC_OFFSET     0x10 /* Tail descriptor Reg */
  46. #define XILINX_DMA_SRCADDR_OFFSET   0x18 /* Source Address Reg */
  47. #define XILINX_DMA_DSTADDR_OFFSET   0x20 /* Dest Address Reg */
  48. #define XILINX_DMA_BTT_OFFSET       0x28 /* Bytes to transfer Reg */
  49.  
  50. /* General register bits definitions */
  51. #define XILINX_DMA_CR_RESET_MASK    0x00000004 /* Reset DMA engine */
  52. #define XILINX_DMA_CR_RUNSTOP_MASK  0x00000001 /* Start/stop DMA engine */
  53.  
  54. #define XILINX_DMA_SR_HALTED_MASK   0x00000001 /* DMA channel halted */
  55. #define XILINX_DMA_SR_IDLE_MASK     0x00000002 /* DMA channel idle */
  56.  
  57. #define XILINX_DMA_XR_IRQ_IOC_MASK  0x00001000 /* Completion interrupt */
  58. #define XILINX_DMA_XR_IRQ_DELAY_MASK    0x00002000 /* Delay interrupt */
  59. #define XILINX_DMA_XR_IRQ_ERROR_MASK    0x00004000 /* Error interrupt */
  60. #define XILINX_DMA_XR_IRQ_ALL_MASK  0x00007000 /* All interrupts */
  61.  
  62. #define XILINX_DMA_XR_DELAY_MASK    0xFF000000 /* Delay timeout counter */
  63. #define XILINX_DMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
  64.  
  65. #define XILINX_DMA_DELAY_SHIFT      24 /* Delay timeout counter shift */
  66. #define XILINX_DMA_COALESCE_SHIFT   16 /* Coalesce counter shift */
  67.  
  68. #define XILINX_DMA_DELAY_MAX        0xFF /* Maximum delay counter value */
  69. #define XILINX_DMA_COALESCE_MAX     0xFF /* Max coalescing counter value */
  70.  
  71. #define XILINX_DMA_RX_CHANNEL_OFFSET    0x30 /* S2MM Channel Offset */
  72.  
  73. /* BD definitions for AXI Dma */
  74. #define XILINX_DMA_HW_DESC_STATUS_CMPLIT BIT(31)
  75. #define XILINX_DMA_HW_DESC_STATUS_DMA_DEC_ERR BIT(30)
  76. #define XILINX_DMA_HW_DESC_STATUS_DMA_SLV_ERR BIT(29)
  77. #define XILINX_DMA_HW_DESC_STATUS_DMA_INT_ERR BIT(28)
  78. #define XILINX_DMA_HW_DESC_STATUS_ALL_MASK  GENMASK(31, 28)
  79. #define XILINX_DMA_HW_DESC_STATUS_ERROR_MASK    GENMASK(30, 28)
  80. #define XILINX_DMA_HW_DESC_SOF  BIT(27) /* Start of packet bit */
  81. #define XILINX_DMA_HW_DESC_EOF  BIT(26) /* End of packet bit */
  82. #define XILINX_DMA_HW_DESC_LENGTH GENMASK(22, 0)
  83.  
  84. /* Delay loop counter to prevent hardware failure */
  85. #define XILINX_DMA_RESET_LOOP       1000000
  86. #define XILINX_DMA_HALT_LOOP        1000000
  87.  
  88. #if defined(CONFIG_XILINX_DMATEST) || defined(CONFIG_XILINX_DMATEST_MODULE)
  89. # define TEST_DMA_WITH_LOOPBACK
  90. #endif
  91.  
  92. /* Hardware descriptor */
  93. struct xilinx_dma_desc_hw {
  94.     u32 next_desc;  /* 0x00 */
  95.     u32 pad1;   /* 0x04 */
  96.     u32 buf_addr;   /* 0x08 */
  97.     u32 pad2;   /* 0x0C */
  98.     u32 pad3;   /* 0x10 */
  99.     u32 pad4;   /* 0x14 */
  100.     u32 control;    /* 0x18 */
  101.     u32 status; /* 0x1C */
  102.     u32 app_0;  /* 0x20 */
  103.     u32 app_1;  /* 0x24 */
  104.     u32 app_2;  /* 0x28 */
  105.     u32 app_3;  /* 0x2C */
  106.     u32 app_4;  /* 0x30 */
  107. } __aligned(64);
  108.  
  109. struct xilinx_dma_segment {
  110.     struct xilinx_dma_desc_hw hw;
  111.     struct list_head node;
  112.     dma_addr_t phys;
  113. } __aligned(64);
  114.  
  115. /* Software descriptor */
  116. struct xilinx_dma_desc_sw {
  117.     struct dma_async_tx_descriptor async_tx;
  118.     struct list_head segments;
  119.     struct list_head node;
  120. };
  121.  
  122. /* Per DMA specific operations should be embedded in the channel structure */
  123. struct xilinx_dma_chan {
  124.     void __iomem *regs;     /* Control status registers */
  125.     spinlock_t lock;        /* Descriptor operation lock */
  126.     struct list_head active_list;   /* Active descriptors */
  127.     struct list_head pending_list;  /* Descriptors waiting */
  128.     unsigned int pending_list_size;
  129.     unsigned int last_pending_list_size;
  130.     struct list_head done_list;     /* Done descriptors */
  131.     struct dma_chan common;     /* DMA common channel */
  132.     struct dma_pool *desc_pool; /* Descriptors pool */
  133.     struct device *dev;     /* The dma device */
  134.     u32 cntrl;
  135.     int irq;            /* Channel IRQ */
  136.     int id;             /* Channel ID */
  137.     enum dma_transfer_direction direction;
  138.                     /* Transfer direction */
  139.     int max_len;    /* Maximum data len per transfer */
  140.     bool err;   /* Channel has errors */
  141.     struct tasklet_struct tasklet;  /* Cleanup work after irq */
  142.     u32 private;            /* Match info for channel request */
  143.     struct xilinx_dma_config config;
  144.                     /* Device configuration info */
  145. };
  146.  
  147. /* DMA Device Structure */
  148. struct xilinx_dma_device {
  149.     void __iomem *regs;
  150.     struct device *dev;
  151.     struct dma_device common;
  152.     struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
  153. };
  154.  
  155. #define to_xilinx_chan(chan) \
  156.     container_of(chan, struct xilinx_dma_chan, common)
  157.  
  158. /* IO accessors */
  159. static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 val)
  160. {
  161.     writel(val, chan->regs + reg);
  162. }
  163.  
  164. static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
  165. {
  166.     return readl(chan->regs + reg);
  167. }
  168.  
  169. // my version
  170. #define XILINX_DMA_REG_DMACR        0x00
  171. #define XILINX_DMA_DMACR_RUNSTOP    BIT(0)
  172. #define XILINX_DMA_DMACR_RESET      BIT(2)
  173. #define XILINX_DMA_DMACR_KEYHOLE    BIT(3)
  174. #define XILINX_DMA_DMACR_CYCLIC_EN  BIT(4)
  175. #define XILINX_DMA_DMACR_IOC_IRQ    BIT(12)
  176. #define XILINX_DMA_DMACR_DLY_IRQ    BIT(13)
  177. #define XILINX_DMA_DMACR_ERR_IRQ    BIT(14)
  178. #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
  179. #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
  180. #define XILINX_DMA_DMACR_FRAME_COUNT GENMASK(23, 16)
  181. #define XILINX_DMA_DMACR_DELAY_COUNT_SHIFT 24
  182. #define XILINX_DMA_DMACR_DELAY_COUNT_MAX 0xff
  183. #define XILINX_DMA_DMACR_DELAY GENMASK(31, 24)
  184.  
  185. #define XILINX_DMA_REG_DMASR            0x04
  186. #define XILINX_DMA_DMASR_HALTED         BIT(0)
  187. #define XILINX_DMA_DMASR_IDLE           BIT(1)
  188. #define XILINX_DMA_DMASR_SG_INCLD       BIT(3)
  189. #define XILINX_DMA_DMASR_DMA_INT_ERR    BIT(4)
  190. #define XILINX_DMA_DMASR_DMA_SLV_ERR    BIT(5)
  191. #define XILINX_DMA_DMASR_DMA_DEC_ERR    BIT(6)
  192. #define XILINX_DMA_DMASR_SG_INT_ERR     BIT(8)
  193. #define XILINX_DMA_DMASR_SG_SLV_ERR     BIT(9)
  194. #define XILINX_DMA_DMASR_SG_DEC_ERR     BIT(10)
  195. #define XILINX_DMA_DMASR_IOC_IRQ        BIT(12)
  196. #define XILINX_DMA_DMASR_DLY_IRQ        BIT(13)
  197. #define XILINX_DMA_DMASR_ERR_IRQ        BIT(14)
  198. #define XILINX_DMA_DMASR_FRAME_COUNT_SHIFT 16
  199. #define XILINX_DMA_DMASR_FRAME_COUNT GENMASK(23, 16)
  200. #define XILINX_DMA_DMASR_DELAY_COUNT_SHIFT 24
  201. #define XILINX_DMA_DMASR_DELAY GENMASK(31, 24)
  202.  
  203. void dump_desc_hw(struct xilinx_dma_chan *chan, struct xilinx_dma_desc_hw *desc)
  204. {
  205.     dev_dbg(chan->dev,
  206.             "next_desc: %x\n"
  207.             "buf_addr: %x\n"
  208.             "control: %x\n"
  209.                 " SOF: %d"
  210.                 " EOF: %d"
  211.                 " len: %lx\n"
  212.             "status: %x\n"
  213.                 " CMPLIT: %d"
  214.                 " DMA_DEC_ERR: %d"
  215.                 " DMA_SLV_ERR: %d"
  216.                 " DMA_INT_ERR: %d"
  217.                 " tlen: %lx\n"
  218.             , desc->next_desc
  219.             , desc->buf_addr
  220.             , desc->control
  221.             , !!(desc->control & XILINX_DMA_HW_DESC_SOF)
  222.             , !!(desc->control & XILINX_DMA_HW_DESC_EOF)
  223.             , desc->control & XILINX_DMA_HW_DESC_LENGTH
  224.             , desc->status
  225.             , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_CMPLIT)
  226.             , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_DMA_DEC_ERR)
  227.             , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_DMA_SLV_ERR)
  228.             , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_DMA_INT_ERR)
  229.             , desc->status & XILINX_DMA_HW_DESC_LENGTH
  230.     );
  231. }
  232.  
  233. void xilinx_dma_dump_ctrl(struct dma_chan *dchan)
  234. {
  235.     struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
  236.     u32 reg = dma_read(chan, XILINX_DMA_CONTROL_OFFSET);
  237.  
  238.     dev_dbg(chan->dev,
  239.             "id: %d ctrl: %x\n"
  240.             " RUNSTOP: %d"
  241.             " RESET: %d"
  242.             " KEYHOLE: %d"
  243.             " CYCLIC_EN: %d"
  244.             " IOC_IRQ: %d"
  245.             " DLY_IRQ: %d"
  246.             " ERR_IRQ: %d"
  247.             " FRAME_COUNT: %lu"
  248.             " DELAY: %lu\n",
  249.             chan->id,
  250.             reg,
  251.             !!(reg & XILINX_DMA_DMACR_RUNSTOP),
  252.             !!(reg & XILINX_DMA_DMACR_RESET),
  253.             !!(reg & XILINX_DMA_DMACR_KEYHOLE),
  254.             !!(reg & XILINX_DMA_DMACR_CYCLIC_EN),
  255.             !!(reg & XILINX_DMA_DMACR_IOC_IRQ),
  256.             !!(reg & XILINX_DMA_DMACR_DLY_IRQ),
  257.             !!(reg & XILINX_DMA_DMACR_ERR_IRQ),
  258.             (reg & XILINX_DMA_DMACR_FRAME_COUNT) >> XILINX_DMA_DMACR_FRAME_COUNT_SHIFT,
  259.             (reg & XILINX_DMA_DMACR_DELAY) >> XILINX_DMA_DMACR_DELAY_COUNT_SHIFT
  260.     );
  261. }
  262. EXPORT_SYMBOL(xilinx_dma_dump_ctrl);
  263.  
  264. void xilinx_dma_dump_status(struct dma_chan *dchan)
  265. {
  266.     struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
  267.     u32 reg = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
  268.  
  269.     dev_dbg(chan->dev,
  270.             "id: %d status: %x\n"
  271.             " HALTED: %d "
  272.             " IDLE: %d "
  273.             " INT_ERR: %d SLV_ERR: %d DEC_ERR: %d "
  274.             " SG_INT_ERR: %d SG_SLV_ERR: %d SG_DEC_ERR: %d "
  275.             " IOC_IRQ: %d DLY_IRQ: %d ERR_IRQ: %d "
  276.             " FC: %lu DLY: %lu\n",
  277.             chan->id, reg,
  278.             !!(reg & XILINX_DMA_DMASR_HALTED),
  279.             !!(reg & XILINX_DMA_DMASR_IDLE),
  280.             !!(reg & XILINX_DMA_DMASR_DMA_INT_ERR),
  281.             !!(reg & XILINX_DMA_DMASR_DMA_SLV_ERR),
  282.             !!(reg & XILINX_DMA_DMASR_DMA_DEC_ERR),
  283.             !!(reg & XILINX_DMA_DMASR_SG_INT_ERR),
  284.             !!(reg & XILINX_DMA_DMASR_SG_SLV_ERR),
  285.             !!(reg & XILINX_DMA_DMASR_SG_DEC_ERR),
  286.             !!(reg & XILINX_DMA_DMASR_IOC_IRQ),
  287.             !!(reg & XILINX_DMA_DMASR_DLY_IRQ),
  288.             !!(reg & XILINX_DMA_DMASR_ERR_IRQ),
  289.             (reg & XILINX_DMA_DMASR_FRAME_COUNT) >> XILINX_DMA_DMASR_FRAME_COUNT_SHIFT,
  290.             (reg & XILINX_DMA_DMASR_DELAY) >> XILINX_DMA_DMASR_DELAY_COUNT_SHIFT
  291.     );
  292. }
  293. EXPORT_SYMBOL(xilinx_dma_dump_status);
  294.  
  295. void xilinx_dma_dump_info(struct dma_chan *dchan)
  296. {
  297.     struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
  298. #if 0
  299.     struct xilinx_dma_desc_sw *desc, *next;
  300. #endif
  301.     dev_err(chan->dev,
  302.             "active_list_empty: %d "
  303.             "pending_list_size: %d "
  304.             "done_list_empty: %d\n",
  305.             list_empty(&chan->active_list),
  306.             chan->pending_list_size,
  307.             list_empty(&chan->done_list)
  308.     );
  309. #if 0
  310.     /* Iterate over descriptors */
  311.     if (list_empty(&chan->active_list))
  312.         return;
  313.  
  314.     list_for_each_entry_safe(desc, next, &chan->active_list, node) {
  315.         struct xilinx_dma_segment *segment;
  316.         dev_err(chan->dev, "desc: %p\n", desc);
  317.         /* Iterate over segments */
  318.         list_for_each_entry(segment, &desc->segments, node) {
  319.             struct xilinx_dma_desc_hw *hw = &segment->hw;
  320.             dev_err(chan->dev, "\tsegment: %p\n", segment);
  321.             dump_desc_hw(chan, hw);
  322.         }
  323.     }
  324. #endif
  325. }
  326. EXPORT_SYMBOL(xilinx_dma_dump_info);
  327.  
  328. static struct
  329. xilinx_dma_segment *xilinx_dma_alloc_segment(struct xilinx_dma_chan *chan)
  330. {
  331.     struct xilinx_dma_segment *segment;
  332.     dma_addr_t phys;
  333.  
  334.     segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
  335.     if (!segment)
  336.         return NULL;
  337.  
  338.     memset(segment, 0, sizeof(*segment));
  339.     segment->phys = phys;
  340.  
  341.     return segment;
  342. }
  343.  
  344. static void xilinx_dma_free_segment(struct xilinx_dma_chan *chan,
  345.                                     struct xilinx_dma_segment *segment)
  346. {
  347.     dma_pool_free(chan->desc_pool, segment, segment->phys);
  348. }
  349.  
  350. static struct
  351. xilinx_dma_desc_sw *xilinx_dma_alloc_descriptor(struct xilinx_dma_chan *chan)
  352. {
  353.     struct xilinx_dma_desc_sw *desc;
  354.  
  355.     desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
  356.     if (!desc)
  357.         return NULL;
  358.  
  359.     INIT_LIST_HEAD(&desc->segments);
  360.  
  361.     return desc;
  362. }
  363.  
  364. static void
  365. xilinx_dma_free_descriptor(struct xilinx_dma_chan *chan,
  366.                             struct xilinx_dma_desc_sw *desc)
  367. {
  368.     struct xilinx_dma_segment *segment, *next;
  369.  
  370.     BUG_ON(!desc);
  371.  
  372.     list_for_each_entry_safe(segment, next, &desc->segments, node) {
  373.         list_del(&segment->node);
  374.         xilinx_dma_free_segment(chan, segment);
  375.     }
  376.  
  377.     kfree(desc);
  378. }
  379.  
  380. static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
  381.                                         struct list_head *list)
  382. {
  383.     struct xilinx_dma_desc_sw *desc, *next;
  384.  
  385.     list_for_each_entry_safe(desc, next, list, node) {
  386.         list_del(&desc->node);
  387.         xilinx_dma_free_descriptor(chan, desc);
  388.     }
  389. }
  390.  
  391. static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
  392. {
  393.     struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
  394.  
  395.     /* Has this channel already been allocated? */
  396.     if (chan->desc_pool)
  397.         return 1;
  398.  
  399.     /*
  400.      * We need the descriptor to be aligned to 64bytes
  401.      * for meeting Xilinx DMA specification requirement.
  402.      */
  403.     chan->desc_pool =
  404.         dma_pool_create("xilinx_dma_desc_pool", chan->dev,
  405.                 sizeof(struct xilinx_dma_segment),
  406.                 __alignof__(struct xilinx_dma_segment), 0);
  407.     if (!chan->desc_pool) {
  408.         dev_err(chan->dev,
  409.             "unable to allocate channel %d descriptor pool\n",
  410.             chan->id);
  411.         return -ENOMEM;
  412.     }
  413.  
  414.     /* There is at least one descriptor free to be allocated */
  415.     return 1;
  416. }
  417.  
  418. static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
  419. {
  420.     struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
  421.  
  422.     dev_dbg(chan->dev, "Free all channel resources.\n");
  423.  
  424.     spin_lock(&chan->lock);
  425.     xilinx_dma_free_desc_list(chan, &chan->active_list);
  426.     xilinx_dma_free_desc_list(chan, &chan->pending_list);
  427.     xilinx_dma_free_desc_list(chan, &chan->done_list);
  428.     dma_pool_destroy(chan->desc_pool);
  429.  
  430.     chan->desc_pool = NULL;
  431.     spin_unlock(&chan->lock);
  432. }
  433.  
  434. static void xilinx_dma_complete_descriptors(struct xilinx_dma_chan *chan)
  435. {
  436.     struct xilinx_dma_desc_sw *desc;
  437.  
  438.     while (!list_empty(&chan->done_list)) {
  439.         dma_async_tx_callback callback;
  440.         void *callback_param;
  441.  
  442.         desc = list_first_entry(&chan->done_list,
  443.             struct xilinx_dma_desc_sw, node);
  444.  
  445.         /* Remove from the list of done transactions */
  446.         list_del(&desc->node);
  447.  
  448.         /* Run the link descriptor callback function */
  449.         callback = desc->async_tx.callback;
  450.         callback_param = desc->async_tx.callback_param;
  451.         if (callback) {
  452.             callback(callback_param);
  453.         }
  454.  
  455.         /* Run any dependencies, then free the descriptor */
  456.         xilinx_dma_free_descriptor(chan, desc);
  457.     }
  458. }
  459.  
  460. static enum dma_status xilinx_tx_status(struct dma_chan *dchan,
  461.                     dma_cookie_t cookie,
  462.                     struct dma_tx_state *txstate)
  463. {
  464.     return dma_cookie_status(dchan, cookie, txstate);
  465. }
  466.  
  467. /* Stop the hardware, the ongoing transfer will be finished */
  468. static void dma_halt(struct xilinx_dma_chan *chan)
  469. {
  470.     int loop = XILINX_DMA_HALT_LOOP;
  471.     u32 status;
  472.     chan->cntrl &= ~XILINX_DMA_CR_RUNSTOP_MASK;
  473.     dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
  474.  
  475.     /* Wait for the hardware to halt */
  476.     do {
  477.         status = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
  478.         if (status & XILINX_DMA_SR_HALTED_MASK)
  479.             break;
  480.     } while (loop--);
  481.  
  482.     if (!loop) {
  483.         dev_err(chan->dev, "Cannot stop channel %p: %x\n",
  484.             chan, chan->cntrl);
  485.         chan->err = true;
  486.     }
  487. }
  488.  
  489. /* Start the hardware. Transfers are not started yet */
  490. static void dma_start(struct xilinx_dma_chan *chan)
  491. {
  492.     int loop = XILINX_DMA_HALT_LOOP;
  493.     chan->cntrl |= XILINX_DMA_CR_RUNSTOP_MASK;
  494.     dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
  495.  
  496.     /* Wait for the hardware to start */
  497.     do {
  498.         if (!(dma_read(chan, XILINX_DMA_STATUS_OFFSET) &
  499.             XILINX_DMA_SR_HALTED_MASK))
  500.             break;
  501.     } while (loop--);
  502.  
  503.     if (!loop) {
  504.         dev_err(chan->dev, "Cannot start channel %p: %x\n",
  505.              chan, chan->cntrl);
  506.  
  507.         chan->err = true;
  508.     }
  509. }
  510.  
  511. static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
  512. {
  513.     struct xilinx_dma_desc_sw *head_desc, *tail_desc;
  514.     struct xilinx_dma_segment *tail_segment;
  515.     u32 status;
  516.     if (chan->err)
  517.         return;
  518.  
  519.     if (!list_empty(&chan->active_list))
  520.         return;
  521.  
  522.     if (list_empty(&chan->pending_list))
  523.         return;
  524.  
  525.     /* If hardware is busy, cannot submit */
  526.     /*
  527.     * RS        &&  !HALTED     &&  !IDLE -> BUSY
  528.     */
  529. //  if (dma_is_running(chan) && !dma_is_idle(chan)) {
  530. //      dev_dbg(chan->dev, "DMA controller still busy\n");
  531. //      goto out_unlock;
  532. //  }
  533.     // If not halted, check if idle. If idle, halt the DMA.
  534.     // If not idle, quit.
  535.     // If halted, go ahead and queue.
  536.     /*
  537.     * RS        &&  IDLE    -> HALT -> PROCEED
  538.     * RS        &&  !IDLE   -> BUSY
  539.     * !HALTED   &&  IDLE    -> HALT -> PROCEED
  540.     * !HALTED   &&  !IDLE   -> BUSY
  541.     * !RS       &&  HALTED  -> PROCEED
  542.     */
  543.     status = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
  544.     if (chan->cntrl & XILINX_DMA_CR_RUNSTOP_MASK) {
  545.         if (status & XILINX_DMA_SR_IDLE_MASK) {
  546.             dma_halt(chan);
  547.             if (chan->err)
  548.                 return;
  549.         } else {
  550. //          dev_err(chan->dev, "1 - DMA controller still busy\n");
  551. //          dump_status(chan);
  552.             return;
  553.         }
  554.     } else if (!(status & XILINX_DMA_SR_HALTED_MASK)) {
  555.         if (status & XILINX_DMA_SR_IDLE_MASK) {
  556.             dma_halt(chan);
  557.             if (chan->err)
  558.                 return;
  559.         } else {
  560.             dev_err(chan->dev, "2 - DMA controller still busy\n");
  561. //          dump_status(chan);
  562.             return;
  563.         }
  564.     }
  565.  
  566.     /*
  567.      * If hardware is idle, then all descriptors on active list are
  568.      * done, start new transfers
  569.      */
  570.     head_desc = list_first_entry(&chan->pending_list,
  571.             struct xilinx_dma_desc_sw, node);
  572.     tail_desc = list_last_entry(&chan->pending_list,
  573.             struct xilinx_dma_desc_sw, node);
  574.     tail_segment = list_last_entry(&tail_desc->segments,
  575.             struct xilinx_dma_segment, node);
  576.  
  577.     if (chan->last_pending_list_size != chan->pending_list_size) {
  578.         // Setup right delay
  579. //      pr_warn("id: %d pending_list_size: %d\n", chan->id, chan->pending_list_size);
  580.         chan->cntrl &= ~XILINX_DMA_XR_COALESCE_MASK;
  581.         chan->cntrl |= chan->pending_list_size << XILINX_DMA_COALESCE_SHIFT;
  582.         dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
  583.         chan->last_pending_list_size = chan->pending_list_size;
  584.     }
  585.  
  586.     dma_write(chan, XILINX_DMA_CDESC_OFFSET, head_desc->async_tx.phys);
  587.     dma_start(chan);
  588.  
  589.     if (unlikely(chan->err))
  590.         return;
  591.  
  592.     list_splice_tail_init(&chan->pending_list, &chan->active_list);
  593.  
  594.     /* Update tail ptr register and start the transfer */
  595.     dma_write(chan, XILINX_DMA_TDESC_OFFSET, tail_segment->phys);
  596.     chan->pending_list_size = 0;
  597. }
  598. static void xilinx_dma_check_descriptors(struct xilinx_dma_chan *chan);
  599.  
  600. static void xilinx_dma_issue_pending(struct dma_chan *dchan)
  601. {
  602.     struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
  603.     unsigned long flags;
  604.     if (!list_empty(&chan->active_list)) {
  605.         return;
  606.     }
  607.  
  608.     spin_lock_irqsave(&chan->lock, flags);
  609.     xilinx_dma_check_descriptors(chan);
  610.     xilinx_dma_start_transfer(chan);
  611.     spin_unlock_irqrestore(&chan->lock, flags);
  612. }
  613.  
  614. /**
  615.  * xilinx_dma_check_descriptors - TODO.
  616.  * @chan : xilinx DMA channel
  617.  *
  618.  * CONTEXT: hardirq
  619.  */
  620. static void xilinx_dma_check_descriptors(struct xilinx_dma_chan *chan)
  621. {
  622.     struct xilinx_dma_desc_sw *desc, *next;
  623.  
  624.     if (list_empty(&chan->active_list)) {
  625. //      dev_dbg(chan->dev, "no running descriptors\n");
  626.         return;
  627.     }
  628.  
  629.     /* Iterate over descriptors */
  630.     list_for_each_entry_safe(desc, next, &chan->active_list, node) {
  631.         int control_len;
  632.         int status_len;
  633.         struct xilinx_dma_segment *segment;
  634. //      dev_dbg(chan->dev, "desc: %p\n", desc);
  635.         /* Iterate over segments */
  636.         list_for_each_entry(segment, &desc->segments, node) {
  637.             struct xilinx_dma_desc_hw *hw = &segment->hw;
  638. //          dev_dbg(chan->dev, "segment: %p\n", segment);
  639. //          dump_desc_hw(chan, hw);
  640. #ifdef DEBUG
  641.             if (hw->status & XILINX_DMA_HW_DESC_STATUS_ERROR_MASK)
  642.                 BUG();
  643. #endif
  644.             if (!(hw->status & XILINX_DMA_HW_DESC_STATUS_CMPLIT))
  645.                 return;
  646.  
  647. #ifdef DEBUG
  648.             control_len = hw->control & (u32)GENMASK(22, 0);
  649.             status_len = hw->status & (u32)GENMASK(22, 0);
  650.  
  651.             if (control_len != status_len) {
  652.                 dev_err(chan->dev, "clen: %d slen: %d\n", control_len, status_len);
  653.                 WARN_ON(control_len != status_len);
  654.             }
  655. #endif
  656.         }
  657.  
  658.         list_del(&desc->node);
  659.         dma_cookie_complete(&desc->async_tx);
  660.         list_add_tail(&desc->node, &chan->done_list);
  661.     }
  662. }
  663. /**
  664.  * xilinx_dma_chan_config - Configure DMA Channel IRQThreshold, IRQDelay
  665.  * and enable interrupts
  666.  * @chan: DMA channel
  667.  */
  668. static void xilinx_dma_chan_config(struct xilinx_dma_chan *chan)
  669. {
  670.     chan->cntrl = dma_read(chan, XILINX_DMA_CONTROL_OFFSET);
  671.  
  672.     chan->cntrl &= ~XILINX_DMA_XR_COALESCE_MASK;
  673.     chan->cntrl |= chan->config.coalesc << XILINX_DMA_COALESCE_SHIFT;
  674.  
  675.     chan->cntrl &= ~XILINX_DMA_XR_DELAY_MASK;
  676.     chan->cntrl |= chan->config.delay << XILINX_DMA_DELAY_SHIFT;
  677.  
  678.     chan->cntrl |= XILINX_DMA_XR_IRQ_ALL_MASK;
  679.  
  680.     dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
  681. }
  682.  
  683. /* Reset hardware */
  684. static int dma_reset(struct xilinx_dma_chan *chan)
  685. {
  686.     int loop = XILINX_DMA_RESET_LOOP;
  687.     u32 tmp;
  688.  
  689.     dma_write(chan, XILINX_DMA_CONTROL_OFFSET,
  690.           dma_read(chan, XILINX_DMA_CONTROL_OFFSET) |
  691.           XILINX_DMA_CR_RESET_MASK);
  692.  
  693.     tmp = dma_read(chan, XILINX_DMA_CONTROL_OFFSET) &
  694.             XILINX_DMA_CR_RESET_MASK;
  695.  
  696.     /* Wait for the hardware to finish reset */
  697.     while (loop && tmp) {
  698.         tmp = dma_read(chan, XILINX_DMA_CONTROL_OFFSET) &
  699.                         XILINX_DMA_CR_RESET_MASK;
  700.         loop -= 1;
  701.     }
  702.  
  703.     chan->cntrl = tmp;
  704.  
  705.     if (!loop) {
  706.         dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
  707.             dma_read(chan, XILINX_DMA_CONTROL_OFFSET),
  708.             dma_read(chan, XILINX_DMA_STATUS_OFFSET));
  709.         return -EBUSY;
  710.     }
  711.  
  712.     return 0;
  713. }
  714.  
  715. static irqreturn_t dma_intr_handler(int irq, void *data)
  716. {
  717.     struct xilinx_dma_chan *chan = data;
  718.     u32 stat;
  719.     irqreturn_t ret = IRQ_HANDLED;
  720.  
  721.     stat = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
  722.     if (!(stat & XILINX_DMA_XR_IRQ_ALL_MASK)) {
  723.         dev_warn(chan->dev,
  724.                 "%x: spurious irq\n", (unsigned int)chan);
  725.         ret = IRQ_HANDLED;
  726.         goto out;
  727.     }
  728.  
  729.     /* Ack the interrupts */
  730.     dma_write(chan, XILINX_DMA_STATUS_OFFSET,
  731.                 XILINX_DMA_XR_IRQ_ALL_MASK);
  732.  
  733.     if (stat & XILINX_DMA_XR_IRQ_ERROR_MASK) {
  734.         dev_err(chan->dev,
  735.             "Channel %x has errors %x, cdr %x tdr %x\n",
  736.             (unsigned int)chan,
  737.             (unsigned int)dma_read(chan, XILINX_DMA_STATUS_OFFSET),
  738.             (unsigned int)dma_read(chan, XILINX_DMA_CDESC_OFFSET),
  739.             (unsigned int)dma_read(chan, XILINX_DMA_TDESC_OFFSET));
  740. //      dump_ctrl(chan);
  741. //      dump_status(chan);
  742.         chan->err = true;
  743.     }
  744.  
  745.     /*
  746.      * Device takes too long to do the transfer when user requires
  747.      * responsiveness
  748.      */
  749. //  if (stat & XILINX_DMA_XR_IRQ_DELAY_MASK) {
  750. //      dev_warn(chan->dev, "id: %d: Inter-packet latency too long\n", chan->id);
  751. //  }
  752.  
  753. //  if (stat & (XILINX_DMA_XR_IRQ_IOC_MASK | XILINX_DMA_XR_IRQ_DELAY_MASK)) {
  754. //      xilinx_dma_check_descriptors(chan);
  755. //      xilinx_dma_start_transfer(chan);
  756. //  }
  757.  
  758. out:
  759.     tasklet_schedule(&chan->tasklet);
  760.  
  761.     return ret;
  762. }
  763.  
  764. static void xilinx_dma_do_tasklet(unsigned long data)
  765. {
  766.     struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
  767.  
  768.     spin_lock(&chan->lock);
  769.     // Ar trebui sa golim toata lista de active si sa o apendam la done
  770.     xilinx_dma_check_descriptors(chan);
  771.     xilinx_dma_start_transfer(chan);
  772.     xilinx_dma_complete_descriptors(chan);
  773.     spin_unlock(&chan->lock);
  774. }
  775.  
  776. /* Append the descriptor list to the pending list */
  777. static void append_desc_queue(struct xilinx_dma_chan *chan,
  778.                   struct xilinx_dma_desc_sw *desc)
  779. {
  780.     struct xilinx_dma_desc_sw *tail_desc =
  781.         list_last_entry(&chan->pending_list,
  782.                         struct xilinx_dma_desc_sw, node);
  783.     struct xilinx_dma_segment *segment, *tail_segment;
  784.  
  785.     if (list_empty(&chan->pending_list))
  786.         goto append;
  787.  
  788.     /*
  789.      * Add the hardware descriptor to the chain of hardware descriptors
  790.      * that already exists in memory.
  791.      */
  792.     segment = list_first_entry(&desc->segments,
  793.                                 struct xilinx_dma_segment, node);
  794.     tail_segment = list_last_entry(&tail_desc->segments,
  795.                                     struct xilinx_dma_segment, node);
  796.  
  797.     tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
  798.  
  799.     /*
  800.      * Add the software descriptor and all children to the list
  801.      * of pending transactions
  802.      */
  803. append:
  804.     list_add_tail(&desc->node, &chan->pending_list);
  805.     chan->pending_list_size++;
  806.  
  807.     if (unlikely(chan->pending_list_size > 0xFF)) {
  808.         pr_warn("pending_list_size: %d\n", chan->pending_list_size);
  809.         chan->pending_list_size = 0xFF;
  810.         BUG();
  811.     }
  812. }
  813.  
  814. /*
  815.  * Assign cookie to each descriptor, and append the descriptors to the pending
  816.  * list
  817.  */
  818. static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  819. {
  820.     struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
  821.     struct xilinx_dma_desc_sw *desc = container_of(tx, struct xilinx_dma_desc_sw, async_tx);
  822.     dma_cookie_t cookie = -EBUSY;
  823.     unsigned long flags;
  824.  
  825.     spin_lock_irqsave(&chan->lock, flags);
  826.  
  827.     if (chan->err) {
  828.         /*
  829.          * If reset fails, need to hard reset the system.
  830.          * Channel is no longer functional
  831.          */
  832.         if (!dma_reset(chan))
  833.             chan->err = false;
  834.         else
  835.             goto out_unlock;
  836.     }
  837.  
  838.     cookie = dma_cookie_assign(tx);
  839.  
  840.     /* Put this transaction onto the tail of the pending queue */
  841.     append_desc_queue(chan, desc);
  842.  
  843. out_unlock:
  844.     spin_unlock_irqrestore(&chan->lock, flags);
  845.  
  846.     return cookie;
  847. }
  848.  
  849. /**
  850.  * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
  851.  * @chan: DMA channel
  852.  * @sgl: scatterlist to transfer to/from
  853.  * @sg_len: number of entries in @scatterlist
  854.  * @direction: DMA direction
  855.  * @flags: transfer ack flags
  856.  */
  857. static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
  858.     struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
  859.     enum dma_transfer_direction direction, unsigned long flags,
  860.     void *context)
  861. {
  862.     struct xilinx_dma_chan *chan;
  863.     struct xilinx_dma_desc_sw *desc;
  864.     struct xilinx_dma_segment *segment, *prev = NULL;
  865.     struct xilinx_dma_desc_hw *hw;
  866.  
  867.     size_t copy;
  868.  
  869.     int i;
  870.     struct scatterlist *sg;
  871.     size_t sg_used = 0;
  872.     dma_addr_t dma_src;
  873.  
  874.     if (!dchan)
  875.         return NULL;
  876.  
  877.     chan = to_xilinx_chan(dchan);
  878.  
  879.     if (chan->direction != direction)
  880.         return NULL;
  881.  
  882.     /* Allocate a transaction descriptor */
  883.     desc = xilinx_dma_alloc_descriptor(chan);
  884.     if (!desc)
  885.         return NULL;
  886.  
  887.     /* Build transactions using information in the scatter gather list */
  888.     for_each_sg(sgl, sg, sg_len, i) {
  889.         sg_used = 0;
  890.  
  891.         /* Loop until the entire scatterlist entry is used */
  892.         while (sg_used < sg_dma_len(sg)) {
  893.             /* Allocate the link descriptor from DMA pool */
  894.             segment = xilinx_dma_alloc_segment(chan);
  895.             if (!segment) {
  896.                 dev_err(chan->dev,
  897.                     "No free memory for segment\n");
  898.                 goto fail;
  899.             }
  900.             hw = &segment->hw;
  901.  
  902.             /*
  903.              * Calculate the maximum number of bytes to transfer,
  904.              * making sure it is less than the hw limit
  905.              */
  906.             copy = min((size_t)(sg_dma_len(sg) - sg_used),
  907.                    (size_t)chan->max_len);
  908.  
  909.             dma_src = sg_dma_address(sg) + sg_used;
  910.  
  911.             hw->buf_addr = dma_src;
  912.  
  913.             /* Fill in the descriptor */
  914.             hw->control = copy;
  915.  
  916.             /*
  917.              * If this is not the first descriptor, chain the
  918.              * current descriptor after the previous descriptor
  919.              *
  920.              * For the first DMA_MEM_TO_DEV transfer, set SOP
  921.              */
  922.             if (prev) {
  923.                 prev = list_last_entry(&desc->segments,
  924.                         struct xilinx_dma_segment, node);
  925.                 prev->hw.next_desc = segment->phys;
  926.             } else {
  927.                 if (direction == DMA_MEM_TO_DEV) {
  928.                     hw->control |= XILINX_DMA_HW_DESC_SOF;
  929.                 }
  930.             }
  931.  
  932.             prev = segment;
  933.             sg_used += copy;
  934.  
  935.             /* Insert the segment into the descriptor segments list. */
  936.             list_add_tail(&segment->node, &desc->segments);
  937.         }
  938.     }
  939.  
  940.     /* All scatter gather list entries has length == 0 */
  941.     if (!sg_used) {
  942.         BUG();
  943.     }
  944.  
  945.     segment = list_first_entry(&desc->segments,
  946.             struct xilinx_dma_segment, node);
  947.  
  948.     dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
  949.     desc->async_tx.tx_submit = xilinx_dma_tx_submit;
  950.     desc->async_tx.phys = segment->phys;
  951.     desc->async_tx.flags = flags;
  952.     desc->async_tx.cookie = -EBUSY;
  953.     async_tx_ack(&desc->async_tx);
  954.  
  955.     /* Link the last hardware descriptor with the first. */
  956.     hw->next_desc = segment->phys;
  957.  
  958.     /* Set EOP to the last link descriptor of new list */
  959.     hw->control |= XILINX_DMA_HW_DESC_EOF;
  960.  
  961.     return &desc->async_tx;
  962.  
  963. fail:
  964.     xilinx_dma_free_descriptor(chan, desc);
  965.     return NULL;
  966. }
  967.  
  968. /* Run-time device configuration for Axi DMA */
  969. static int xilinx_dma_device_control(struct dma_chan *dchan,
  970.                      enum dma_ctrl_cmd cmd, unsigned long arg)
  971. {
  972.     struct xilinx_dma_chan *chan;
  973.  
  974.     if (!dchan)
  975.         return -EINVAL;
  976.  
  977.     chan = to_xilinx_chan(dchan);
  978.  
  979.     if (cmd == DMA_TERMINATE_ALL) {
  980.         /* Halt the DMA engine */
  981.         dma_halt(chan);
  982.  
  983.         spin_lock_bh(&chan->lock);
  984.         /* Remove and free all of the descriptors in the lists */
  985.         xilinx_dma_free_desc_list(chan, &chan->pending_list);
  986.         xilinx_dma_free_desc_list(chan, &chan->active_list);
  987.         xilinx_dma_free_desc_list(chan, &chan->done_list);
  988.         spin_unlock_bh(&chan->lock);
  989.         return 0;
  990.     } else if (cmd == DMA_SLAVE_CONFIG) {
  991.         /*
  992.          * Configure interrupt coalescing and delay counter
  993.          * Use value XILINX_DMA_NO_CHANGE to signal no change
  994.          */
  995.         struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
  996.  
  997.         if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX)
  998.             chan->config.coalesc = cfg->coalesc;
  999.  
  1000.         if (cfg->delay <= XILINX_DMA_DELAY_MAX)
  1001.             chan->config.delay = cfg->delay;
  1002.  
  1003.         xilinx_dma_chan_config(chan);
  1004.  
  1005.         return 0;
  1006.     } else
  1007.         return -ENXIO;
  1008. }
  1009.  
  1010. static void xilinx_dma_free_channels(struct xilinx_dma_device *xdev)
  1011. {
  1012.     int i;
  1013.  
  1014.     for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
  1015.         if (!xdev->chan[i])
  1016.             continue;
  1017.  
  1018.         list_del(&xdev->chan[i]->common.device_node);
  1019.         tasklet_kill(&xdev->chan[i]->tasklet);
  1020.         irq_dispose_mapping(xdev->chan[i]->irq);
  1021.     }
  1022. }
  1023.  
  1024. /*
  1025.  * Probing channels
  1026.  *
  1027.  * . Get channel features from the device tree entry
  1028.  * . Initialize special channel handling routines
  1029.  */
  1030. static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
  1031.                  struct device_node *node)
  1032. {
  1033.     struct xilinx_dma_chan *chan;
  1034.     int err;
  1035.     u32 device_id;
  1036.  
  1037.     /* alloc channel */
  1038.     chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
  1039.     if (!chan)
  1040.         return -ENOMEM;
  1041.  
  1042.     chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
  1043.     chan->config.coalesc = 0x01;
  1044.  
  1045.     err = of_property_read_u32(node, "xlnx,device-id", &device_id);
  1046.     if (err) {
  1047.         dev_err(xdev->dev, "unable to read device id property");
  1048.         return err;
  1049.     }
  1050.  
  1051.     if (of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel")) {
  1052.         chan->regs = xdev->regs;
  1053.         chan->id = 0;
  1054.         chan->direction = DMA_MEM_TO_DEV;
  1055.     } else if (of_device_is_compatible(node, "xlnx,axi-dma-s2mm-channel")) {
  1056.         chan->regs = (xdev->regs + XILINX_DMA_RX_CHANNEL_OFFSET);
  1057.         chan->id = 1;
  1058.         chan->direction = DMA_DEV_TO_MEM;
  1059.     } else {
  1060.         dev_err(xdev->dev, "Invalid channel compatible node\n");
  1061.         return -EINVAL;
  1062.     }
  1063.  
  1064.     /*
  1065.      * Used by dmatest channel matching in slave transfers
  1066.      * Can change it to be a structure to have more matching information
  1067.      */
  1068.     chan->private = (chan->direction & 0xFF) | XILINX_DMA_IP_DMA |
  1069.             (device_id << XILINX_DMA_DEVICE_ID_SHIFT);
  1070.     chan->common.private = (void *)&(chan->private);
  1071.  
  1072.     dma_cookie_init(&chan->common);
  1073.  
  1074.     chan->dev = xdev->dev;
  1075.     xdev->chan[chan->id] = chan;
  1076.  
  1077.     /* Initialize the channel */
  1078.     err = dma_reset(chan);
  1079.     if (err) {
  1080.         dev_err(xdev->dev, "Reset channel failed\n");
  1081.         return err;
  1082.     }
  1083.  
  1084.     spin_lock_init(&chan->lock);
  1085.     INIT_LIST_HEAD(&chan->pending_list);
  1086.     chan->pending_list_size = 0;
  1087.     chan->last_pending_list_size = 1;
  1088.     INIT_LIST_HEAD(&chan->active_list);
  1089.     INIT_LIST_HEAD(&chan->done_list);
  1090.  
  1091.     chan->common.device = &xdev->common;
  1092.  
  1093.     /* find the IRQ line, if it exists in the device tree */
  1094.     chan->irq = irq_of_parse_and_map(node, 0);
  1095.     err = devm_request_irq(xdev->dev, chan->irq, dma_intr_handler,
  1096.                 IRQF_NO_THREAD,
  1097.                 "xilinx-dma-controller", chan);
  1098.     if (err) {
  1099.         dev_err(xdev->dev, "unable to request IRQ\n");
  1100.         return err;
  1101.     }
  1102.  
  1103.     tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, (unsigned long)chan);
  1104.  
  1105.     /* Add the channel to DMA device channel list */
  1106.     list_add_tail(&chan->common.device_node, &xdev->common.channels);
  1107.  
  1108.     return 0;
  1109. }
  1110.  
  1111. static int xilinx_dma_probe(struct platform_device *pdev)
  1112. {
  1113.     struct xilinx_dma_device *xdev;
  1114.     struct device_node *child, *node;
  1115.     struct resource *res;
  1116.     int ret;
  1117.     unsigned int i;
  1118.  
  1119.     node = pdev->dev.of_node;
  1120.  
  1121.     if (of_get_child_count(node) == 0) {
  1122.         dev_err(&pdev->dev, "no channels defined\n");
  1123.         return -ENODEV;
  1124.     }
  1125.  
  1126.     xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
  1127.     if (!xdev)
  1128.         return -ENOMEM;
  1129.  
  1130.     xdev->dev = &(pdev->dev);
  1131.     INIT_LIST_HEAD(&xdev->common.channels);
  1132.  
  1133.     /* iomap registers */
  1134.     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1135.     xdev->regs = devm_ioremap_resource(&pdev->dev, res);
  1136.     if (IS_ERR(xdev->regs))
  1137.         return PTR_ERR(xdev->regs);
  1138.  
  1139.     /* Axi DMA only do slave transfers */
  1140.     dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
  1141.     dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
  1142.     xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
  1143.     xdev->common.device_control = xilinx_dma_device_control;
  1144.     xdev->common.device_issue_pending = xilinx_dma_issue_pending;
  1145.     xdev->common.device_alloc_chan_resources =
  1146.         xilinx_dma_alloc_chan_resources;
  1147.     xdev->common.device_free_chan_resources =
  1148.         xilinx_dma_free_chan_resources;
  1149.     xdev->common.device_tx_status = xilinx_tx_status;
  1150.     xdev->common.dev = &pdev->dev;
  1151.  
  1152.     platform_set_drvdata(pdev, xdev);
  1153.  
  1154.     for_each_child_of_node(node, child) {
  1155.         ret = xilinx_dma_chan_probe(xdev, child);
  1156.         if (ret) {
  1157.             dev_err(&pdev->dev, "Probing channels failed\n");
  1158.             goto free_chan_resources;
  1159.         }
  1160.     }
  1161.  
  1162.     for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; ++i) {
  1163.         if (xdev->chan[i]) {
  1164.             xilinx_dma_chan_config(xdev->chan[i]);
  1165.         }
  1166.     }
  1167.  
  1168.     ret = dma_async_device_register(&xdev->common);
  1169.     if (ret) {
  1170.         dev_err(&pdev->dev, "DMA device registration failed\n");
  1171.         goto free_chan_resources;
  1172.     }
  1173.  
  1174.     dev_info(&pdev->dev, "probed\n");
  1175.  
  1176.     return 0;
  1177.  
  1178. free_chan_resources:
  1179.     xilinx_dma_free_channels(xdev);
  1180.  
  1181.     return ret;
  1182. }
  1183.  
  1184. static int xilinx_dma_remove(struct platform_device *pdev)
  1185. {
  1186.     struct xilinx_dma_device *xdev;
  1187.  
  1188.     xdev = platform_get_drvdata(pdev);
  1189.     dma_async_device_unregister(&xdev->common);
  1190.  
  1191.     xilinx_dma_free_channels(xdev);
  1192.  
  1193.     return 0;
  1194. }
  1195.  
  1196. static const struct of_device_id xilinx_dma_of_match[] = {
  1197.     { .compatible = "xlnx,axi-dma", },
  1198.     {}
  1199. };
  1200. MODULE_DEVICE_TABLE(of, xilinx_dma_of_match);
  1201.  
  1202. static struct platform_driver xilinx_dma_driver = {
  1203.     .driver = {
  1204.         .name = "xilinx-dma",
  1205.         .of_match_table = xilinx_dma_of_match,
  1206.     },
  1207.     .probe = xilinx_dma_probe,
  1208.     .remove = xilinx_dma_remove,
  1209. };
  1210.  
  1211. module_platform_driver(xilinx_dma_driver);
  1212.  
  1213. MODULE_AUTHOR("Xilinx, Inc.");
  1214. MODULE_DESCRIPTION("Xilinx DMA driver");
  1215. MODULE_LICENSE("GPL v2");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement