Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /*
- * Xilinx AXI DMA Engine support
- *
- * Copyright (C) 2012 - 2013 Xilinx, Inc. All rights reserved.
- *
- * Based on the Freescale DMA driver.
- *
- * Description:
- * . Axi DMA engine, it does transfers between memory and device. It can be
- * configured to have one channel or two channels. If configured as two
- * channels, one is to transmit to a device and another is to receive from
- * a device.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
- //#define DEBUG
- #include <linux/amba/xilinx_dma.h>
- #include <linux/dmapool.h>
- #include <linux/init.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/irqdomain.h>
- #include <linux/module.h>
- #include <linux/of.h>
- #include <linux/of_address.h>
- #include <linux/of_irq.h>
- #include <linux/of_platform.h>
- #include <linux/platform_device.h>
- #include <linux/slab.h>
- #include "../dmaengine.h"
- /* Hw specific definitions */
- #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 /* Max no of channels */
- #define XILINX_DMA_MAX_TRANS_LEN 0x7FFFFF /* Max transfer length */
- /* Register Offsets */
- #define XILINX_DMA_CONTROL_OFFSET 0x00 /* Control Reg */
- #define XILINX_DMA_STATUS_OFFSET 0x04 /* Status Reg */
- #define XILINX_DMA_CDESC_OFFSET 0x08 /* Current descriptor Reg */
- #define XILINX_DMA_TDESC_OFFSET 0x10 /* Tail descriptor Reg */
- #define XILINX_DMA_SRCADDR_OFFSET 0x18 /* Source Address Reg */
- #define XILINX_DMA_DSTADDR_OFFSET 0x20 /* Dest Address Reg */
- #define XILINX_DMA_BTT_OFFSET 0x28 /* Bytes to transfer Reg */
- /* General register bits definitions */
- #define XILINX_DMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
- #define XILINX_DMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
- #define XILINX_DMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
- #define XILINX_DMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
- #define XILINX_DMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
- #define XILINX_DMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
- #define XILINX_DMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
- #define XILINX_DMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
- #define XILINX_DMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
- #define XILINX_DMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
- #define XILINX_DMA_DELAY_SHIFT 24 /* Delay timeout counter shift */
- #define XILINX_DMA_COALESCE_SHIFT 16 /* Coalesce counter shift */
- #define XILINX_DMA_DELAY_MAX 0xFF /* Maximum delay counter value */
- #define XILINX_DMA_COALESCE_MAX 0xFF /* Max coalescing counter value */
- #define XILINX_DMA_RX_CHANNEL_OFFSET 0x30 /* S2MM Channel Offset */
- /* BD definitions for AXI Dma */
- #define XILINX_DMA_HW_DESC_STATUS_CMPLIT BIT(31)
- #define XILINX_DMA_HW_DESC_STATUS_DMA_DEC_ERR BIT(30)
- #define XILINX_DMA_HW_DESC_STATUS_DMA_SLV_ERR BIT(29)
- #define XILINX_DMA_HW_DESC_STATUS_DMA_INT_ERR BIT(28)
- #define XILINX_DMA_HW_DESC_STATUS_ALL_MASK GENMASK(31, 28)
- #define XILINX_DMA_HW_DESC_STATUS_ERROR_MASK GENMASK(30, 28)
- #define XILINX_DMA_HW_DESC_SOF BIT(27) /* Start of packet bit */
- #define XILINX_DMA_HW_DESC_EOF BIT(26) /* End of packet bit */
- #define XILINX_DMA_HW_DESC_LENGTH GENMASK(22, 0)
- /* Delay loop counter to prevent hardware failure */
- #define XILINX_DMA_RESET_LOOP 1000000
- #define XILINX_DMA_HALT_LOOP 1000000
- #if defined(CONFIG_XILINX_DMATEST) || defined(CONFIG_XILINX_DMATEST_MODULE)
- # define TEST_DMA_WITH_LOOPBACK
- #endif
- /* Hardware descriptor */
- struct xilinx_dma_desc_hw {
- u32 next_desc; /* 0x00 */
- u32 pad1; /* 0x04 */
- u32 buf_addr; /* 0x08 */
- u32 pad2; /* 0x0C */
- u32 pad3; /* 0x10 */
- u32 pad4; /* 0x14 */
- u32 control; /* 0x18 */
- u32 status; /* 0x1C */
- u32 app_0; /* 0x20 */
- u32 app_1; /* 0x24 */
- u32 app_2; /* 0x28 */
- u32 app_3; /* 0x2C */
- u32 app_4; /* 0x30 */
- } __aligned(64);
- struct xilinx_dma_segment {
- struct xilinx_dma_desc_hw hw;
- struct list_head node;
- dma_addr_t phys;
- } __aligned(64);
- /* Software descriptor */
- struct xilinx_dma_desc_sw {
- struct dma_async_tx_descriptor async_tx;
- struct list_head segments;
- struct list_head node;
- };
- /* Per DMA specific operations should be embedded in the channel structure */
- struct xilinx_dma_chan {
- void __iomem *regs; /* Control status registers */
- spinlock_t lock; /* Descriptor operation lock */
- struct list_head active_list; /* Active descriptors */
- struct list_head pending_list; /* Descriptors waiting */
- unsigned int pending_list_size;
- unsigned int last_pending_list_size;
- struct list_head done_list; /* Done descriptors */
- struct dma_chan common; /* DMA common channel */
- struct dma_pool *desc_pool; /* Descriptors pool */
- struct device *dev; /* The dma device */
- u32 cntrl;
- int irq; /* Channel IRQ */
- int id; /* Channel ID */
- enum dma_transfer_direction direction;
- /* Transfer direction */
- int max_len; /* Maximum data len per transfer */
- bool err; /* Channel has errors */
- struct tasklet_struct tasklet; /* Cleanup work after irq */
- u32 private; /* Match info for channel request */
- struct xilinx_dma_config config;
- /* Device configuration info */
- };
- /* DMA Device Structure */
- struct xilinx_dma_device {
- void __iomem *regs;
- struct device *dev;
- struct dma_device common;
- struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- };
- #define to_xilinx_chan(chan) \
- container_of(chan, struct xilinx_dma_chan, common)
- /* IO accessors */
- static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 val)
- {
- writel(val, chan->regs + reg);
- }
- static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
- {
- return readl(chan->regs + reg);
- }
- // my version
- #define XILINX_DMA_REG_DMACR 0x00
- #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
- #define XILINX_DMA_DMACR_RESET BIT(2)
- #define XILINX_DMA_DMACR_KEYHOLE BIT(3)
- #define XILINX_DMA_DMACR_CYCLIC_EN BIT(4)
- #define XILINX_DMA_DMACR_IOC_IRQ BIT(12)
- #define XILINX_DMA_DMACR_DLY_IRQ BIT(13)
- #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
- #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
- #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
- #define XILINX_DMA_DMACR_FRAME_COUNT GENMASK(23, 16)
- #define XILINX_DMA_DMACR_DELAY_COUNT_SHIFT 24
- #define XILINX_DMA_DMACR_DELAY_COUNT_MAX 0xff
- #define XILINX_DMA_DMACR_DELAY GENMASK(31, 24)
- #define XILINX_DMA_REG_DMASR 0x04
- #define XILINX_DMA_DMASR_HALTED BIT(0)
- #define XILINX_DMA_DMASR_IDLE BIT(1)
- #define XILINX_DMA_DMASR_SG_INCLD BIT(3)
- #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
- #define XILINX_DMA_DMASR_DMA_SLV_ERR BIT(5)
- #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
- #define XILINX_DMA_DMASR_SG_INT_ERR BIT(8)
- #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
- #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
- #define XILINX_DMA_DMASR_IOC_IRQ BIT(12)
- #define XILINX_DMA_DMASR_DLY_IRQ BIT(13)
- #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
- #define XILINX_DMA_DMASR_FRAME_COUNT_SHIFT 16
- #define XILINX_DMA_DMASR_FRAME_COUNT GENMASK(23, 16)
- #define XILINX_DMA_DMASR_DELAY_COUNT_SHIFT 24
- #define XILINX_DMA_DMASR_DELAY GENMASK(31, 24)
- void dump_desc_hw(struct xilinx_dma_chan *chan, struct xilinx_dma_desc_hw *desc)
- {
- dev_dbg(chan->dev,
- "next_desc: %x\n"
- "buf_addr: %x\n"
- "control: %x\n"
- " SOF: %d"
- " EOF: %d"
- " len: %lx\n"
- "status: %x\n"
- " CMPLIT: %d"
- " DMA_DEC_ERR: %d"
- " DMA_SLV_ERR: %d"
- " DMA_INT_ERR: %d"
- " tlen: %lx\n"
- , desc->next_desc
- , desc->buf_addr
- , desc->control
- , !!(desc->control & XILINX_DMA_HW_DESC_SOF)
- , !!(desc->control & XILINX_DMA_HW_DESC_EOF)
- , desc->control & XILINX_DMA_HW_DESC_LENGTH
- , desc->status
- , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_CMPLIT)
- , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_DMA_DEC_ERR)
- , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_DMA_SLV_ERR)
- , !!(desc->status & XILINX_DMA_HW_DESC_STATUS_DMA_INT_ERR)
- , desc->status & XILINX_DMA_HW_DESC_LENGTH
- );
- }
- void xilinx_dma_dump_ctrl(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- u32 reg = dma_read(chan, XILINX_DMA_CONTROL_OFFSET);
- dev_dbg(chan->dev,
- "id: %d ctrl: %x\n"
- " RUNSTOP: %d"
- " RESET: %d"
- " KEYHOLE: %d"
- " CYCLIC_EN: %d"
- " IOC_IRQ: %d"
- " DLY_IRQ: %d"
- " ERR_IRQ: %d"
- " FRAME_COUNT: %lu"
- " DELAY: %lu\n",
- chan->id,
- reg,
- !!(reg & XILINX_DMA_DMACR_RUNSTOP),
- !!(reg & XILINX_DMA_DMACR_RESET),
- !!(reg & XILINX_DMA_DMACR_KEYHOLE),
- !!(reg & XILINX_DMA_DMACR_CYCLIC_EN),
- !!(reg & XILINX_DMA_DMACR_IOC_IRQ),
- !!(reg & XILINX_DMA_DMACR_DLY_IRQ),
- !!(reg & XILINX_DMA_DMACR_ERR_IRQ),
- (reg & XILINX_DMA_DMACR_FRAME_COUNT) >> XILINX_DMA_DMACR_FRAME_COUNT_SHIFT,
- (reg & XILINX_DMA_DMACR_DELAY) >> XILINX_DMA_DMACR_DELAY_COUNT_SHIFT
- );
- }
- EXPORT_SYMBOL(xilinx_dma_dump_ctrl);
- void xilinx_dma_dump_status(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- u32 reg = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
- dev_dbg(chan->dev,
- "id: %d status: %x\n"
- " HALTED: %d "
- " IDLE: %d "
- " INT_ERR: %d SLV_ERR: %d DEC_ERR: %d "
- " SG_INT_ERR: %d SG_SLV_ERR: %d SG_DEC_ERR: %d "
- " IOC_IRQ: %d DLY_IRQ: %d ERR_IRQ: %d "
- " FC: %lu DLY: %lu\n",
- chan->id, reg,
- !!(reg & XILINX_DMA_DMASR_HALTED),
- !!(reg & XILINX_DMA_DMASR_IDLE),
- !!(reg & XILINX_DMA_DMASR_DMA_INT_ERR),
- !!(reg & XILINX_DMA_DMASR_DMA_SLV_ERR),
- !!(reg & XILINX_DMA_DMASR_DMA_DEC_ERR),
- !!(reg & XILINX_DMA_DMASR_SG_INT_ERR),
- !!(reg & XILINX_DMA_DMASR_SG_SLV_ERR),
- !!(reg & XILINX_DMA_DMASR_SG_DEC_ERR),
- !!(reg & XILINX_DMA_DMASR_IOC_IRQ),
- !!(reg & XILINX_DMA_DMASR_DLY_IRQ),
- !!(reg & XILINX_DMA_DMASR_ERR_IRQ),
- (reg & XILINX_DMA_DMASR_FRAME_COUNT) >> XILINX_DMA_DMASR_FRAME_COUNT_SHIFT,
- (reg & XILINX_DMA_DMASR_DELAY) >> XILINX_DMA_DMASR_DELAY_COUNT_SHIFT
- );
- }
- EXPORT_SYMBOL(xilinx_dma_dump_status);
- void xilinx_dma_dump_info(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- #if 0
- struct xilinx_dma_desc_sw *desc, *next;
- #endif
- dev_err(chan->dev,
- "active_list_empty: %d "
- "pending_list_size: %d "
- "done_list_empty: %d\n",
- list_empty(&chan->active_list),
- chan->pending_list_size,
- list_empty(&chan->done_list)
- );
- #if 0
- /* Iterate over descriptors */
- if (list_empty(&chan->active_list))
- return;
- list_for_each_entry_safe(desc, next, &chan->active_list, node) {
- struct xilinx_dma_segment *segment;
- dev_err(chan->dev, "desc: %p\n", desc);
- /* Iterate over segments */
- list_for_each_entry(segment, &desc->segments, node) {
- struct xilinx_dma_desc_hw *hw = &segment->hw;
- dev_err(chan->dev, "\tsegment: %p\n", segment);
- dump_desc_hw(chan, hw);
- }
- }
- #endif
- }
- EXPORT_SYMBOL(xilinx_dma_dump_info);
- static struct
- xilinx_dma_segment *xilinx_dma_alloc_segment(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_segment *segment;
- dma_addr_t phys;
- segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
- memset(segment, 0, sizeof(*segment));
- segment->phys = phys;
- return segment;
- }
- static void xilinx_dma_free_segment(struct xilinx_dma_chan *chan,
- struct xilinx_dma_segment *segment)
- {
- dma_pool_free(chan->desc_pool, segment, segment->phys);
- }
- static struct
- xilinx_dma_desc_sw *xilinx_dma_alloc_descriptor(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_desc_sw *desc;
- desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
- if (!desc)
- return NULL;
- INIT_LIST_HEAD(&desc->segments);
- return desc;
- }
- static void
- xilinx_dma_free_descriptor(struct xilinx_dma_chan *chan,
- struct xilinx_dma_desc_sw *desc)
- {
- struct xilinx_dma_segment *segment, *next;
- BUG_ON(!desc);
- list_for_each_entry_safe(segment, next, &desc->segments, node) {
- list_del(&segment->node);
- xilinx_dma_free_segment(chan, segment);
- }
- kfree(desc);
- }
- static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
- struct list_head *list)
- {
- struct xilinx_dma_desc_sw *desc, *next;
- list_for_each_entry_safe(desc, next, list, node) {
- list_del(&desc->node);
- xilinx_dma_free_descriptor(chan, desc);
- }
- }
- static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- /* Has this channel already been allocated? */
- if (chan->desc_pool)
- return 1;
- /*
- * We need the descriptor to be aligned to 64bytes
- * for meeting Xilinx DMA specification requirement.
- */
- chan->desc_pool =
- dma_pool_create("xilinx_dma_desc_pool", chan->dev,
- sizeof(struct xilinx_dma_segment),
- __alignof__(struct xilinx_dma_segment), 0);
- if (!chan->desc_pool) {
- dev_err(chan->dev,
- "unable to allocate channel %d descriptor pool\n",
- chan->id);
- return -ENOMEM;
- }
- /* There is at least one descriptor free to be allocated */
- return 1;
- }
- static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- dev_dbg(chan->dev, "Free all channel resources.\n");
- spin_lock(&chan->lock);
- xilinx_dma_free_desc_list(chan, &chan->active_list);
- xilinx_dma_free_desc_list(chan, &chan->pending_list);
- xilinx_dma_free_desc_list(chan, &chan->done_list);
- dma_pool_destroy(chan->desc_pool);
- chan->desc_pool = NULL;
- spin_unlock(&chan->lock);
- }
- static void xilinx_dma_complete_descriptors(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_desc_sw *desc;
- while (!list_empty(&chan->done_list)) {
- dma_async_tx_callback callback;
- void *callback_param;
- desc = list_first_entry(&chan->done_list,
- struct xilinx_dma_desc_sw, node);
- /* Remove from the list of done transactions */
- list_del(&desc->node);
- /* Run the link descriptor callback function */
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
- if (callback) {
- callback(callback_param);
- }
- /* Run any dependencies, then free the descriptor */
- xilinx_dma_free_descriptor(chan, desc);
- }
- }
- static enum dma_status xilinx_tx_status(struct dma_chan *dchan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
- {
- return dma_cookie_status(dchan, cookie, txstate);
- }
- /* Stop the hardware, the ongoing transfer will be finished */
- static void dma_halt(struct xilinx_dma_chan *chan)
- {
- int loop = XILINX_DMA_HALT_LOOP;
- u32 status;
- chan->cntrl &= ~XILINX_DMA_CR_RUNSTOP_MASK;
- dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
- /* Wait for the hardware to halt */
- do {
- status = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
- if (status & XILINX_DMA_SR_HALTED_MASK)
- break;
- } while (loop--);
- if (!loop) {
- dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, chan->cntrl);
- chan->err = true;
- }
- }
- /* Start the hardware. Transfers are not started yet */
- static void dma_start(struct xilinx_dma_chan *chan)
- {
- int loop = XILINX_DMA_HALT_LOOP;
- chan->cntrl |= XILINX_DMA_CR_RUNSTOP_MASK;
- dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
- /* Wait for the hardware to start */
- do {
- if (!(dma_read(chan, XILINX_DMA_STATUS_OFFSET) &
- XILINX_DMA_SR_HALTED_MASK))
- break;
- } while (loop--);
- if (!loop) {
- dev_err(chan->dev, "Cannot start channel %p: %x\n",
- chan, chan->cntrl);
- chan->err = true;
- }
- }
- static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_desc_sw *head_desc, *tail_desc;
- struct xilinx_dma_segment *tail_segment;
- u32 status;
- if (chan->err)
- return;
- if (!list_empty(&chan->active_list))
- return;
- if (list_empty(&chan->pending_list))
- return;
- /* If hardware is busy, cannot submit */
- /*
- * RS && !HALTED && !IDLE -> BUSY
- */
- // if (dma_is_running(chan) && !dma_is_idle(chan)) {
- // dev_dbg(chan->dev, "DMA controller still busy\n");
- // goto out_unlock;
- // }
- // If not halted, check if idle. If idle, halt the DMA.
- // If not idle, quit.
- // If halted, go ahead and queue.
- /*
- * RS && IDLE -> HALT -> PROCEED
- * RS && !IDLE -> BUSY
- * !HALTED && IDLE -> HALT -> PROCEED
- * !HALTED && !IDLE -> BUSY
- * !RS && HALTED -> PROCEED
- */
- status = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
- if (chan->cntrl & XILINX_DMA_CR_RUNSTOP_MASK) {
- if (status & XILINX_DMA_SR_IDLE_MASK) {
- dma_halt(chan);
- if (chan->err)
- return;
- } else {
- // dev_err(chan->dev, "1 - DMA controller still busy\n");
- // dump_status(chan);
- return;
- }
- } else if (!(status & XILINX_DMA_SR_HALTED_MASK)) {
- if (status & XILINX_DMA_SR_IDLE_MASK) {
- dma_halt(chan);
- if (chan->err)
- return;
- } else {
- dev_err(chan->dev, "2 - DMA controller still busy\n");
- // dump_status(chan);
- return;
- }
- }
- /*
- * If hardware is idle, then all descriptors on active list are
- * done, start new transfers
- */
- head_desc = list_first_entry(&chan->pending_list,
- struct xilinx_dma_desc_sw, node);
- tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_dma_desc_sw, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_dma_segment, node);
- if (chan->last_pending_list_size != chan->pending_list_size) {
- // Setup right delay
- // pr_warn("id: %d pending_list_size: %d\n", chan->id, chan->pending_list_size);
- chan->cntrl &= ~XILINX_DMA_XR_COALESCE_MASK;
- chan->cntrl |= chan->pending_list_size << XILINX_DMA_COALESCE_SHIFT;
- dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
- chan->last_pending_list_size = chan->pending_list_size;
- }
- dma_write(chan, XILINX_DMA_CDESC_OFFSET, head_desc->async_tx.phys);
- dma_start(chan);
- if (unlikely(chan->err))
- return;
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- /* Update tail ptr register and start the transfer */
- dma_write(chan, XILINX_DMA_TDESC_OFFSET, tail_segment->phys);
- chan->pending_list_size = 0;
- }
- static void xilinx_dma_check_descriptors(struct xilinx_dma_chan *chan);
- static void xilinx_dma_issue_pending(struct dma_chan *dchan)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
- unsigned long flags;
- if (!list_empty(&chan->active_list)) {
- return;
- }
- spin_lock_irqsave(&chan->lock, flags);
- xilinx_dma_check_descriptors(chan);
- xilinx_dma_start_transfer(chan);
- spin_unlock_irqrestore(&chan->lock, flags);
- }
- /**
- * xilinx_dma_check_descriptors - TODO.
- * @chan : xilinx DMA channel
- *
- * CONTEXT: hardirq
- */
- static void xilinx_dma_check_descriptors(struct xilinx_dma_chan *chan)
- {
- struct xilinx_dma_desc_sw *desc, *next;
- if (list_empty(&chan->active_list)) {
- // dev_dbg(chan->dev, "no running descriptors\n");
- return;
- }
- /* Iterate over descriptors */
- list_for_each_entry_safe(desc, next, &chan->active_list, node) {
- int control_len;
- int status_len;
- struct xilinx_dma_segment *segment;
- // dev_dbg(chan->dev, "desc: %p\n", desc);
- /* Iterate over segments */
- list_for_each_entry(segment, &desc->segments, node) {
- struct xilinx_dma_desc_hw *hw = &segment->hw;
- // dev_dbg(chan->dev, "segment: %p\n", segment);
- // dump_desc_hw(chan, hw);
- #ifdef DEBUG
- if (hw->status & XILINX_DMA_HW_DESC_STATUS_ERROR_MASK)
- BUG();
- #endif
- if (!(hw->status & XILINX_DMA_HW_DESC_STATUS_CMPLIT))
- return;
- #ifdef DEBUG
- control_len = hw->control & (u32)GENMASK(22, 0);
- status_len = hw->status & (u32)GENMASK(22, 0);
- if (control_len != status_len) {
- dev_err(chan->dev, "clen: %d slen: %d\n", control_len, status_len);
- WARN_ON(control_len != status_len);
- }
- #endif
- }
- list_del(&desc->node);
- dma_cookie_complete(&desc->async_tx);
- list_add_tail(&desc->node, &chan->done_list);
- }
- }
- /**
- * xilinx_dma_chan_config - Configure DMA Channel IRQThreshold, IRQDelay
- * and enable interrupts
- * @chan: DMA channel
- */
- static void xilinx_dma_chan_config(struct xilinx_dma_chan *chan)
- {
- chan->cntrl = dma_read(chan, XILINX_DMA_CONTROL_OFFSET);
- chan->cntrl &= ~XILINX_DMA_XR_COALESCE_MASK;
- chan->cntrl |= chan->config.coalesc << XILINX_DMA_COALESCE_SHIFT;
- chan->cntrl &= ~XILINX_DMA_XR_DELAY_MASK;
- chan->cntrl |= chan->config.delay << XILINX_DMA_DELAY_SHIFT;
- chan->cntrl |= XILINX_DMA_XR_IRQ_ALL_MASK;
- dma_write(chan, XILINX_DMA_CONTROL_OFFSET, chan->cntrl);
- }
- /* Reset hardware */
- static int dma_reset(struct xilinx_dma_chan *chan)
- {
- int loop = XILINX_DMA_RESET_LOOP;
- u32 tmp;
- dma_write(chan, XILINX_DMA_CONTROL_OFFSET,
- dma_read(chan, XILINX_DMA_CONTROL_OFFSET) |
- XILINX_DMA_CR_RESET_MASK);
- tmp = dma_read(chan, XILINX_DMA_CONTROL_OFFSET) &
- XILINX_DMA_CR_RESET_MASK;
- /* Wait for the hardware to finish reset */
- while (loop && tmp) {
- tmp = dma_read(chan, XILINX_DMA_CONTROL_OFFSET) &
- XILINX_DMA_CR_RESET_MASK;
- loop -= 1;
- }
- chan->cntrl = tmp;
- if (!loop) {
- dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
- dma_read(chan, XILINX_DMA_CONTROL_OFFSET),
- dma_read(chan, XILINX_DMA_STATUS_OFFSET));
- return -EBUSY;
- }
- return 0;
- }
- static irqreturn_t dma_intr_handler(int irq, void *data)
- {
- struct xilinx_dma_chan *chan = data;
- u32 stat;
- irqreturn_t ret = IRQ_HANDLED;
- stat = dma_read(chan, XILINX_DMA_STATUS_OFFSET);
- if (!(stat & XILINX_DMA_XR_IRQ_ALL_MASK)) {
- dev_warn(chan->dev,
- "%x: spurious irq\n", (unsigned int)chan);
- ret = IRQ_HANDLED;
- goto out;
- }
- /* Ack the interrupts */
- dma_write(chan, XILINX_DMA_STATUS_OFFSET,
- XILINX_DMA_XR_IRQ_ALL_MASK);
- if (stat & XILINX_DMA_XR_IRQ_ERROR_MASK) {
- dev_err(chan->dev,
- "Channel %x has errors %x, cdr %x tdr %x\n",
- (unsigned int)chan,
- (unsigned int)dma_read(chan, XILINX_DMA_STATUS_OFFSET),
- (unsigned int)dma_read(chan, XILINX_DMA_CDESC_OFFSET),
- (unsigned int)dma_read(chan, XILINX_DMA_TDESC_OFFSET));
- // dump_ctrl(chan);
- // dump_status(chan);
- chan->err = true;
- }
- /*
- * Device takes too long to do the transfer when user requires
- * responsiveness
- */
- // if (stat & XILINX_DMA_XR_IRQ_DELAY_MASK) {
- // dev_warn(chan->dev, "id: %d: Inter-packet latency too long\n", chan->id);
- // }
- // if (stat & (XILINX_DMA_XR_IRQ_IOC_MASK | XILINX_DMA_XR_IRQ_DELAY_MASK)) {
- // xilinx_dma_check_descriptors(chan);
- // xilinx_dma_start_transfer(chan);
- // }
- out:
- tasklet_schedule(&chan->tasklet);
- return ret;
- }
- static void xilinx_dma_do_tasklet(unsigned long data)
- {
- struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
- spin_lock(&chan->lock);
- // Ar trebui sa golim toata lista de active si sa o apendam la done
- xilinx_dma_check_descriptors(chan);
- xilinx_dma_start_transfer(chan);
- xilinx_dma_complete_descriptors(chan);
- spin_unlock(&chan->lock);
- }
- /* Append the descriptor list to the pending list */
- static void append_desc_queue(struct xilinx_dma_chan *chan,
- struct xilinx_dma_desc_sw *desc)
- {
- struct xilinx_dma_desc_sw *tail_desc =
- list_last_entry(&chan->pending_list,
- struct xilinx_dma_desc_sw, node);
- struct xilinx_dma_segment *segment, *tail_segment;
- if (list_empty(&chan->pending_list))
- goto append;
- /*
- * Add the hardware descriptor to the chain of hardware descriptors
- * that already exists in memory.
- */
- segment = list_first_entry(&desc->segments,
- struct xilinx_dma_segment, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_dma_segment, node);
- tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
- /*
- * Add the software descriptor and all children to the list
- * of pending transactions
- */
- append:
- list_add_tail(&desc->node, &chan->pending_list);
- chan->pending_list_size++;
- if (unlikely(chan->pending_list_size > 0xFF)) {
- pr_warn("pending_list_size: %d\n", chan->pending_list_size);
- chan->pending_list_size = 0xFF;
- BUG();
- }
- }
- /*
- * Assign cookie to each descriptor, and append the descriptors to the pending
- * list
- */
- static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
- {
- struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
- struct xilinx_dma_desc_sw *desc = container_of(tx, struct xilinx_dma_desc_sw, async_tx);
- dma_cookie_t cookie = -EBUSY;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
- if (chan->err) {
- /*
- * If reset fails, need to hard reset the system.
- * Channel is no longer functional
- */
- if (!dma_reset(chan))
- chan->err = false;
- else
- goto out_unlock;
- }
- cookie = dma_cookie_assign(tx);
- /* Put this transaction onto the tail of the pending queue */
- append_desc_queue(chan, desc);
- out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
- return cookie;
- }
- /**
- * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: transfer ack flags
- */
- static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
- struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
- {
- struct xilinx_dma_chan *chan;
- struct xilinx_dma_desc_sw *desc;
- struct xilinx_dma_segment *segment, *prev = NULL;
- struct xilinx_dma_desc_hw *hw;
- size_t copy;
- int i;
- struct scatterlist *sg;
- size_t sg_used = 0;
- dma_addr_t dma_src;
- if (!dchan)
- return NULL;
- chan = to_xilinx_chan(dchan);
- if (chan->direction != direction)
- return NULL;
- /* Allocate a transaction descriptor */
- desc = xilinx_dma_alloc_descriptor(chan);
- if (!desc)
- return NULL;
- /* Build transactions using information in the scatter gather list */
- for_each_sg(sgl, sg, sg_len, i) {
- sg_used = 0;
- /* Loop until the entire scatterlist entry is used */
- while (sg_used < sg_dma_len(sg)) {
- /* Allocate the link descriptor from DMA pool */
- segment = xilinx_dma_alloc_segment(chan);
- if (!segment) {
- dev_err(chan->dev,
- "No free memory for segment\n");
- goto fail;
- }
- hw = &segment->hw;
- /*
- * Calculate the maximum number of bytes to transfer,
- * making sure it is less than the hw limit
- */
- copy = min((size_t)(sg_dma_len(sg) - sg_used),
- (size_t)chan->max_len);
- dma_src = sg_dma_address(sg) + sg_used;
- hw->buf_addr = dma_src;
- /* Fill in the descriptor */
- hw->control = copy;
- /*
- * If this is not the first descriptor, chain the
- * current descriptor after the previous descriptor
- *
- * For the first DMA_MEM_TO_DEV transfer, set SOP
- */
- if (prev) {
- prev = list_last_entry(&desc->segments,
- struct xilinx_dma_segment, node);
- prev->hw.next_desc = segment->phys;
- } else {
- if (direction == DMA_MEM_TO_DEV) {
- hw->control |= XILINX_DMA_HW_DESC_SOF;
- }
- }
- prev = segment;
- sg_used += copy;
- /* Insert the segment into the descriptor segments list. */
- list_add_tail(&segment->node, &desc->segments);
- }
- }
- /* All scatter gather list entries has length == 0 */
- if (!sg_used) {
- BUG();
- }
- segment = list_first_entry(&desc->segments,
- struct xilinx_dma_segment, node);
- dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- desc->async_tx.phys = segment->phys;
- desc->async_tx.flags = flags;
- desc->async_tx.cookie = -EBUSY;
- async_tx_ack(&desc->async_tx);
- /* Link the last hardware descriptor with the first. */
- hw->next_desc = segment->phys;
- /* Set EOP to the last link descriptor of new list */
- hw->control |= XILINX_DMA_HW_DESC_EOF;
- return &desc->async_tx;
- fail:
- xilinx_dma_free_descriptor(chan, desc);
- return NULL;
- }
- /* Run-time device configuration for Axi DMA */
- static int xilinx_dma_device_control(struct dma_chan *dchan,
- enum dma_ctrl_cmd cmd, unsigned long arg)
- {
- struct xilinx_dma_chan *chan;
- if (!dchan)
- return -EINVAL;
- chan = to_xilinx_chan(dchan);
- if (cmd == DMA_TERMINATE_ALL) {
- /* Halt the DMA engine */
- dma_halt(chan);
- spin_lock_bh(&chan->lock);
- /* Remove and free all of the descriptors in the lists */
- xilinx_dma_free_desc_list(chan, &chan->pending_list);
- xilinx_dma_free_desc_list(chan, &chan->active_list);
- xilinx_dma_free_desc_list(chan, &chan->done_list);
- spin_unlock_bh(&chan->lock);
- return 0;
- } else if (cmd == DMA_SLAVE_CONFIG) {
- /*
- * Configure interrupt coalescing and delay counter
- * Use value XILINX_DMA_NO_CHANGE to signal no change
- */
- struct xilinx_dma_config *cfg = (struct xilinx_dma_config *)arg;
- if (cfg->coalesc <= XILINX_DMA_COALESCE_MAX)
- chan->config.coalesc = cfg->coalesc;
- if (cfg->delay <= XILINX_DMA_DELAY_MAX)
- chan->config.delay = cfg->delay;
- xilinx_dma_chan_config(chan);
- return 0;
- } else
- return -ENXIO;
- }
- static void xilinx_dma_free_channels(struct xilinx_dma_device *xdev)
- {
- int i;
- for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) {
- if (!xdev->chan[i])
- continue;
- list_del(&xdev->chan[i]->common.device_node);
- tasklet_kill(&xdev->chan[i]->tasklet);
- irq_dispose_mapping(xdev->chan[i]->irq);
- }
- }
- /*
- * Probing channels
- *
- * . Get channel features from the device tree entry
- * . Initialize special channel handling routines
- */
- static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
- struct device_node *node)
- {
- struct xilinx_dma_chan *chan;
- int err;
- u32 device_id;
- /* alloc channel */
- chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- chan->max_len = XILINX_DMA_MAX_TRANS_LEN;
- chan->config.coalesc = 0x01;
- err = of_property_read_u32(node, "xlnx,device-id", &device_id);
- if (err) {
- dev_err(xdev->dev, "unable to read device id property");
- return err;
- }
- if (of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel")) {
- chan->regs = xdev->regs;
- chan->id = 0;
- chan->direction = DMA_MEM_TO_DEV;
- } else if (of_device_is_compatible(node, "xlnx,axi-dma-s2mm-channel")) {
- chan->regs = (xdev->regs + XILINX_DMA_RX_CHANNEL_OFFSET);
- chan->id = 1;
- chan->direction = DMA_DEV_TO_MEM;
- } else {
- dev_err(xdev->dev, "Invalid channel compatible node\n");
- return -EINVAL;
- }
- /*
- * Used by dmatest channel matching in slave transfers
- * Can change it to be a structure to have more matching information
- */
- chan->private = (chan->direction & 0xFF) | XILINX_DMA_IP_DMA |
- (device_id << XILINX_DMA_DEVICE_ID_SHIFT);
- chan->common.private = (void *)&(chan->private);
- dma_cookie_init(&chan->common);
- chan->dev = xdev->dev;
- xdev->chan[chan->id] = chan;
- /* Initialize the channel */
- err = dma_reset(chan);
- if (err) {
- dev_err(xdev->dev, "Reset channel failed\n");
- return err;
- }
- spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->pending_list);
- chan->pending_list_size = 0;
- chan->last_pending_list_size = 1;
- INIT_LIST_HEAD(&chan->active_list);
- INIT_LIST_HEAD(&chan->done_list);
- chan->common.device = &xdev->common;
- /* find the IRQ line, if it exists in the device tree */
- chan->irq = irq_of_parse_and_map(node, 0);
- err = devm_request_irq(xdev->dev, chan->irq, dma_intr_handler,
- IRQF_NO_THREAD,
- "xilinx-dma-controller", chan);
- if (err) {
- dev_err(xdev->dev, "unable to request IRQ\n");
- return err;
- }
- tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, (unsigned long)chan);
- /* Add the channel to DMA device channel list */
- list_add_tail(&chan->common.device_node, &xdev->common.channels);
- return 0;
- }
- static int xilinx_dma_probe(struct platform_device *pdev)
- {
- struct xilinx_dma_device *xdev;
- struct device_node *child, *node;
- struct resource *res;
- int ret;
- unsigned int i;
- node = pdev->dev.of_node;
- if (of_get_child_count(node) == 0) {
- dev_err(&pdev->dev, "no channels defined\n");
- return -ENODEV;
- }
- xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
- if (!xdev)
- return -ENOMEM;
- xdev->dev = &(pdev->dev);
- INIT_LIST_HEAD(&xdev->common.channels);
- /* iomap registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
- /* Axi DMA only do slave transfers */
- dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
- dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
- xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
- xdev->common.device_control = xilinx_dma_device_control;
- xdev->common.device_issue_pending = xilinx_dma_issue_pending;
- xdev->common.device_alloc_chan_resources =
- xilinx_dma_alloc_chan_resources;
- xdev->common.device_free_chan_resources =
- xilinx_dma_free_chan_resources;
- xdev->common.device_tx_status = xilinx_tx_status;
- xdev->common.dev = &pdev->dev;
- platform_set_drvdata(pdev, xdev);
- for_each_child_of_node(node, child) {
- ret = xilinx_dma_chan_probe(xdev, child);
- if (ret) {
- dev_err(&pdev->dev, "Probing channels failed\n");
- goto free_chan_resources;
- }
- }
- for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; ++i) {
- if (xdev->chan[i]) {
- xilinx_dma_chan_config(xdev->chan[i]);
- }
- }
- ret = dma_async_device_register(&xdev->common);
- if (ret) {
- dev_err(&pdev->dev, "DMA device registration failed\n");
- goto free_chan_resources;
- }
- dev_info(&pdev->dev, "probed\n");
- return 0;
- free_chan_resources:
- xilinx_dma_free_channels(xdev);
- return ret;
- }
- static int xilinx_dma_remove(struct platform_device *pdev)
- {
- struct xilinx_dma_device *xdev;
- xdev = platform_get_drvdata(pdev);
- dma_async_device_unregister(&xdev->common);
- xilinx_dma_free_channels(xdev);
- return 0;
- }
- static const struct of_device_id xilinx_dma_of_match[] = {
- { .compatible = "xlnx,axi-dma", },
- {}
- };
- MODULE_DEVICE_TABLE(of, xilinx_dma_of_match);
- static struct platform_driver xilinx_dma_driver = {
- .driver = {
- .name = "xilinx-dma",
- .of_match_table = xilinx_dma_of_match,
- },
- .probe = xilinx_dma_probe,
- .remove = xilinx_dma_remove,
- };
- module_platform_driver(xilinx_dma_driver);
- MODULE_AUTHOR("Xilinx, Inc.");
- MODULE_DESCRIPTION("Xilinx DMA driver");
- MODULE_LICENSE("GPL v2");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement