Advertisement
Guest User

Untitled

a guest
Nov 7th, 2012
117
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 6.16 KB | None | 0 0
  1. static int
  2. rsp_setup_dma (struct rsp_dma *dma, int direction, unsigned long data, u32 size)
  3. {
  4.     u32 first, last;
  5.     int msg_err;
  6.     int s = 0;
  7.     dma->direction = direction;
  8.     first = (data & PAGE_MASK) >> PAGE_SHIFT; // first page number
  9.     last = ((data + size - 1) & PAGE_MASK) >> PAGE_SHIFT; // last page number
  10.     dma->offset = data & ~PAGE_MASK;
  11.     dma->nr_pages = last - first + 1;
  12.     dma->tail = 1 + ((data + size - 1) & ~PAGE_MASK);
  13.     msg_dbg("dma nr_pages %d",dma->nr_pages);
  14.     dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*), GFP_KERNEL);
  15.  
  16.     if (!dma->pages)
  17.     {
  18.         msg_err("NULL == dma->pages");
  19.         return -ENOMEM;
  20.     }
  21.     msg_dbg("init user [0x%lx+0x%x => %d pages]",data,size,dma->nr_pages);
  22.  
  23.     down_read(&current->mm->mmap_sem);
  24.     msg_err = get_user_pages(current, current->mm, data & PAGE_MASK,
  25.                              dma->nr_pages, direction == PCI_DMA_FROMDEVICE, 0,
  26.                              dma->pages, NULL);
  27.  
  28.     up_read(&current->mm->mmap_sem);
  29.  
  30.     if (msg_err == dma->nr_pages)
  31.     {
  32.         msg_dbg("get_user_pages ok, %d pages",msg_err);
  33.         return 0;
  34.     } else
  35.     {
  36.         msg_err("get_user_pages failed, msg_err=%d must be %d",msg_err,dma->nr_pages);
  37.         return -EINVAL;
  38.     }
  39. }
  40.  
  41. struct scatterlist*
  42. rsp_setup_sglist(struct page **pages, int nr_pages, int offset, int tail)
  43. {
  44.     struct scatterlist *sglist;
  45.     int i, o;
  46.  
  47.     if (pages == NULL)
  48.     {
  49.         msg_err("rsp_setup_sglist error pages == NULL");
  50.         return NULL;
  51.     }
  52.  
  53.     sglist = kmalloc(sizeof(*sglist) * nr_pages, GFP_KERNEL);
  54.  
  55.     if (NULL == sglist)
  56.     {
  57.         msg_err("rsp_setup_sglist kmalloc error sglist==NULL");
  58.         return NULL;
  59.     }
  60.  
  61.     memset(sglist, 0, sizeof(*sglist) * nr_pages);
  62.  
  63.     for (i = 0; i < nr_pages; i++)
  64.     {
  65.         if (pages[i] == NULL) {
  66.             msg_err("rsp_setup_sglist error page %d == NULL",i);
  67.             kfree(sglist);
  68.             return NULL;
  69.         }
  70.         if (i == 0) {
  71.             sg_set_page(&sglist[i], pages[i], PAGE_SIZE - offset, offset);
  72.         } else if (i == nr_pages - 1) {
  73.             sg_set_page(&sglist[i], pages[i], tail, 0);
  74.         } else
  75.             sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
  76.     }
  77.  
  78.     return sglist;
  79. }
  80.  
  81.  
  82. irqreturn_t
  83. fpga_int_handler (int irq, void * dev_id)
  84. {
  85.     u32 job_id;
  86.     u32 v32;
  87.     msg_dbg("fpga_int_handler started\n");
  88.     struct rsp_device *rsp_device = (struct rsp_device *) dev_id;
  89.     if (irq != rsp_device->intr)
  90.     {
  91.         msg_err("msg_error in fpga_int_handler:irq:%i!=intr:%i",irq,rsp_device->intr);
  92.         return IRQ_NONE;
  93.     }
  94.  
  95.  
  96.     v32 = ioread32 ((void *)((unsigned long)rsp_device->bar[BAR_NUMBER].vaddr + M_MODE));
  97.     msg_dbg ("v32 = %d\n", v32);
  98.     if ((int)v32 == -1)
  99.             return IRQ_NONE;
  100.     if (!((v32 & COMPL_WR) || (v32 & COMPL_RD)))
  101.             return IRQ_NONE;
  102. //  msg_dbg("fpga_int_handler started\n");
  103.  
  104.  
  105.         if (rsp_device->direction == READ)
  106.             fpga_finish_dma_write(rsp_device);
  107.         else
  108.             fpga_finish_dma_read(rsp_device);
  109.         job_id = fpga_scheduler_dma_job_finished (fpga_task_scheduler);
  110. //  }
  111.  
  112.     v32 &= ~(COMPL_WR | COMPL_RD | INT_REG);
  113.  
  114.     msg_dbg("Mark job %u as done\n", job_id);
  115.     spin_lock(&jobs_lock);
  116.     if (job_id != 0)
  117.          mark_job_as_done(job_id);
  118.     spin_unlock(&jobs_lock);
  119.     iowrite32(v32,(void *)((unsigned long)rsp_device->bar[BAR_NUMBER].vaddr + M_MODE));
  120.  
  121.     msg_dbg("fpga_int_handler finished\n");
  122.     return IRQ_HANDLED;
  123. }
  124.  
  125. int
  126. fpga_push_data_to_device (struct rsp_device *rsp_dev, u64 host_addr, u64 size)
  127. {
  128.     int num;
  129.     int i;
  130.     u32 v32, l32;
  131.     u32 cur_buf_addr, cur_buf_length, tail;
  132.  
  133. //  iowrite32((u32)size, (void *)((unsigned long)rsp_dev->bar[2].vaddr + WMP_WRITE_LEN));
  134. //  iowrite32(INT_PCIE_ZBT_RX, (void *)((unsigned long)rsp_dev->bar[2].vaddr + WMP_INTCON_REG));
  135.  
  136.     rsp_dev->direction = READ;
  137.     rsp_dev->dma_r.direction = PCI_DMA_TODEVICE;
  138.     rsp_dev->intr_mask = 0x02 << REG_SHIFT_R;
  139.  
  140.     if (rsp_setup_dma(&(rsp_dev->dma_r), PCI_DMA_TODEVICE, host_addr, size) != 0)
  141.         return -1;
  142.     rsp_dev->dma_r.sglist = rsp_setup_sglist (rsp_dev->dma_r.pages,
  143.                                               rsp_dev->dma_r.nr_pages,
  144.                                               rsp_dev->dma_r.offset,
  145.                                               rsp_dev->dma_r.tail);
  146.  
  147.     if (rsp_dev->dma_r.sglist == NULL)
  148.     {
  149.         kfree(rsp_dev->dma_r.pages);
  150.         return -1;
  151.     }
  152.  
  153.     rsp_dev->dma_r.sglen = pci_map_sg (rsp_dev->pdev,
  154.                                        rsp_dev->dma_r.sglist,
  155.                                        rsp_dev->dma_r.nr_pages,
  156.                                        rsp_dev->dma_r.direction);
  157.     if (rsp_dev->dma_r.sglen == 0)
  158.     {
  159.         kfree(rsp_dev->dma_r.sglist);
  160.         rsp_dev->dma_r.sglist = NULL;
  161.         return -1;
  162.     }
  163.  
  164.     pci_dma_sync_sg_for_device (rsp_dev->pdev,
  165.                                 rsp_dev->dma_r.sglist,
  166.                                 rsp_dev->dma_r.sglen,
  167.                                 rsp_dev->dma_r.direction);
  168.  
  169.     num = rsp_dev->dma_r.nr_pages;
  170.     if (num > MAX_DMA_PAGES)
  171.         num = MAX_DMA_PAGES;
  172.  
  173.     for (i = 0; i < num; i++) {
  174.         msg_dbg(" PCI_DMA_FROMDEVICE page %d go",i);
  175.         cur_buf_addr = sg_dma_address(&rsp_dev->dma_r.sglist[i]);
  176.         cur_buf_length = sg_dma_len(&rsp_dev->dma_r.sglist[i]);
  177.         msg_dbg(" DMA write allocated buffer v32=%x, l32=%x", cur_buf_addr, cur_buf_length);
  178.  
  179.         while (cur_buf_length > 0) {
  180.             tail = PAGE_SIZE - (cur_buf_addr & (PAGE_SIZE - 1));
  181.             l32 = (cur_buf_length < tail) ? cur_buf_length : tail;
  182.             v32 = cur_buf_addr;
  183.             msg_dbg(" DMA write transaction buffer v32=%x, l32=%x, tail=%x, cbl=%x",
  184.                     v32, l32, tail, cur_buf_length);
  185.  
  186.             iowrite32(
  187.                     v32,
  188.                     (void *) ((unsigned long) rsp_dev->bar[BAR_NUMBER].vaddr
  189.                             + M_RD_ADDR));
  190.             iowrite32(
  191.                     l32,
  192.                     (void *) ((unsigned long) rsp_dev->bar[BAR_NUMBER].vaddr
  193.                             + M_RD_CNT));
  194.             cur_buf_addr += l32;
  195.             cur_buf_length -= l32;
  196.         }
  197.     }
  198.  
  199.     v32 = ioread32((void *)((unsigned long)rsp_dev->bar[BAR_NUMBER].vaddr + M_MODE));
  200.     v32 |= START_RD;
  201.     iowrite32(v32, (void *)((unsigned long)rsp_dev->bar[BAR_NUMBER].vaddr + M_MODE));
  202.  
  203.     return 0;
  204. }
  205.  
  206. static void
  207. fpga_finish_dma_write (struct rsp_device * device)
  208. {
  209.     int i;
  210.     msg_dbg("Finish DMA write started\n");
  211.     pci_dma_sync_sg_for_cpu(device->pdev,
  212.                             device->dma_r.sglist,
  213.                             device->dma_r.sglen,
  214.                             device->dma_r.direction);
  215.  
  216.     for (i = 0; i < device->dma_r.nr_pages; i++)
  217.     {
  218.         if (!PageReserved(device->dma_r.pages[i]))
  219.             SetPageDirty(device->dma_r.pages[i]);
  220.  
  221.         page_cache_release(device->dma_r.pages[i]);
  222.     }
  223.  
  224.     pci_unmap_sg (device->pdev, device->dma_r.sglist,
  225.                   device->dma_r.nr_pages, device->dma_r.direction);
  226.     kfree(device->dma_r.sglist);
  227.     kfree(device->dma_r.pages);
  228.     msg_dbg("Finish DMA write success\n");
  229.  
  230. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement