Advertisement
Guest User

rsp pci driver

a guest
Feb 21st, 2012
1,233
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 6.40 KB | None | 0 0
  1. int rsp_setup_dma(struct rsp_dma *dma, int direction, unsigned long data,
  2.         u32 size) {
  3.     u32 first, last;
  4.     int msg_err;
  5.  
  6. // PAGE_SHIFT = 12
  7. // PAGE_SIZE = 4k
  8. // PAGE_MASK = ~(PAGE_SIZE - 1) = ~(0x0000 0FFF) = 0xFFFF F000
  9.  
  10.     dma->direction = direction;
  11.     first = (data & PAGE_MASK) >> PAGE_SHIFT; // first page number
  12.     last = ((data + size - 1) & PAGE_MASK) >> PAGE_SHIFT; // last page number
  13.     dma->offset = data & ~PAGE_MASK;
  14.     dma->nr_pages = last - first + 1;
  15.     msg_dbg("dma nr_pages %d", dma->nr_pages);
  16.     dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*), GFP_KERNEL);
  17.  
  18.     if (!dma->pages) {
  19.         msg_err("NULL == dma->pages");
  20.         return -ENOMEM;
  21.     }
  22.     msg_dbg("init user [0x%lx+0x%x => %d pages]", data, size, dma->nr_pages);
  23.  
  24.     down_read(&current->mm->mmap_sem);
  25.     msg_err = get_user_pages(current, current->mm, data & PAGE_MASK,
  26.             dma->nr_pages, direction == PCI_DMA_FROMDEVICE, 1, dma->pages,
  27.             NULL);
  28.     up_read(&current->mm->mmap_sem);
  29.  
  30.     if (msg_err == dma->nr_pages) {
  31.         msg_dbg("get_user_pages ok, %d pages", msg_err);
  32.         return 0;
  33.     } else {
  34.         msg_err("get_user_pages failed, msg_err=%d must be %d", msg_err,
  35.                 dma->nr_pages);
  36.         return -EINVAL;
  37.     }
  38.  
  39. }
  40.  
  41. int dma_uninit_user(struct rsp_dma *dma) {
  42.     if (dma == NULL) {
  43.         msg_err("dma_uninit_user dma==NULL");
  44.         return 0;
  45.     }
  46.     kfree(dma->sglist);
  47.     kfree(dma->pages);
  48.     return 0;
  49. }
  50.  
  51. struct scatterlist* rsp_setup_sglist(struct page **pages, int nr_pages,
  52.         int offset) {
  53.     struct scatterlist *sglist;
  54.     int i, o;
  55.  
  56.     if (pages == NULL) {
  57.         msg_err("rsp_setup_sglist error pages == NULL");
  58.         return NULL;
  59.     }
  60.  
  61.     sglist = kmalloc(sizeof(*sglist) * nr_pages, GFP_KERNEL);
  62.  
  63.     if (NULL == sglist) {
  64.         msg_err("rsp_setup_sglist kmalloc error sglist==NULL");
  65.         return NULL;
  66.     }
  67.  
  68.     memset(sglist, 0, sizeof(*sglist) * nr_pages);
  69.  
  70.     for (i = 0; i < nr_pages; i++) {
  71.         if (PageHighMem(pages[i]))
  72.             msg_dbg("page in high memory\n");
  73.         else
  74.             msg_dbg("page in low memory\n");
  75.         if (i == 0)
  76.             o = offset;
  77.         else
  78.             o = 0;
  79.         if (pages[i] == NULL) {
  80.             msg_err("rsp_setup_sglist error page %d == NULL", i);
  81.             kfree(sglist);
  82.             return NULL;
  83.         }
  84.         sg_set_page(&sglist[i], pages[i], PAGE_SIZE - o, o);
  85. //      sglist[i].page_link = (unsigned long) pages[i];
  86. //      sglist[i].length = PAGE_SIZE - o;
  87.     }
  88.     return sglist;
  89. }
  90. static ssize_t rsp_write(struct file *file, const char __user *buf, size_t count, loff_t *offset)
  91. {
  92.     struct rsp_device *device=file->private_data;
  93.     ssize_t retval=0;
  94.     u32 v32,l32;
  95.     int i,n;
  96.  
  97.     msg_dbg(" Entering function:%s\n", __FUNCTION__);
  98.  
  99.     if(device==NULL) {
  100.         msg_err(" rsp_write device==NULL");
  101.         return 0;
  102.     }
  103.     msg_dbg(" User buffer address = 0x%x\n", buf);
  104.  
  105.     device->direction=READ;
  106.     device->dma_r.direction=PCI_DMA_TODEVICE;
  107.     device->intr_mask=0x02<<REG_SHIFT_R;
  108.  
  109.     if(rsp_setup_dma(&(device->dma_r),PCI_DMA_TODEVICE,(unsigned long)buf,count)==0) {
  110.  
  111.         msg_dbg(" mapping ok");
  112.         msg_dbg(" offset:%X",device->dma_r.offset);
  113.  
  114.         unsigned long l_addr, p_addr;
  115.  
  116.         l_addr = (unsigned long)page_address( device->dma_r.pages[0]);
  117.         if (l_addr != NULL) {
  118.             msg_dbg(" page logical addr = 0x%x\n",l_addr);
  119.             p_addr = __pa(l_addr);
  120.             if (p_addr != NULL)
  121.             msg_dbg(" page phy addr = 0x%x\n",p_addr);
  122.             else
  123.             msg_dbg(" page phy addr = NULL\n");
  124.         } else
  125.         msg_dbg(" page logical addr = NULL\n");
  126.  
  127.         device->dma_r.sglist=rsp_setup_sglist(device->dma_r.pages,
  128.                 device->dma_r.nr_pages,
  129.                 device->dma_r.offset);
  130.         if (device->dma_r.sglist==NULL) {
  131.             msg_err(" msg_error generating ScatterGather List, aborting writing");
  132.             return -EIO;
  133.         }
  134.  
  135.         device->dma_r.sglen = pci_map_sg(device->pdev,
  136.                 device->dma_r.sglist,
  137.                 device->dma_r.nr_pages,
  138.                 device->dma_r.direction);
  139.  
  140.         if (device->dma_r.sglen==0) {
  141.             msg_err(" pci_map_sg failed\n");
  142.             kfree(device->dma_r.sglist);
  143.             device->dma_r.sglist = NULL;
  144.             device->dma_r.sglen = 0;
  145.             retval=-EIO;
  146.             goto __xexit;
  147.         }
  148.         msg_dbg(" mapping OK: %i buffers",device->dma_r.sglen);
  149.  
  150.         pci_dma_sync_sg_for_device(device->pdev,
  151.                 device->dma_r.sglist,
  152.                 device->dma_r.sglen,
  153.                 device->dma_r.direction);
  154.  
  155.         msg_dbg(" setup dma controller for read");
  156.  
  157.         n=device->dma_r.nr_pages;
  158.         if(n>MAX_DMA_PAGES)n=MAX_DMA_PAGES;
  159.  
  160.         for (i=0;i<n;i++) {
  161.             v32=sg_dma_address(&device->dma_r.sglist[i]);
  162.  
  163.             if (v32 & (~PAGE_MASK) ) {
  164.                 msg_err("DMA buffer is not alligned to 4kb, v32=0x%x", v32);
  165.                 device->flag=2;
  166.                 goto __xclenup_dma;
  167.             }
  168.         }
  169.  
  170.         for (i=0;i<n;i++) {
  171.             msg_dbg(" PCI_DMA_FROMDEVICE page %d go",i);
  172.             v32=sg_dma_address(&device->dma_r.sglist[i]);
  173.             l32=sg_dma_len(&device->dma_r.sglist[i]);
  174.             msg_dbg(" write Master Write Address %x,%x",v32,l32);
  175.  
  176.             iowrite32(v32, (void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_RD_ADDR));
  177.             iowrite32(l32, (void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_RD_CNT));
  178.         }
  179.  
  180.         msg_dbg(" prepare wait queue");
  181.  
  182.         DECLARE_WAITQUEUE(wait,current);
  183.         add_wait_queue(&device->intr_wait,&wait);
  184.  
  185.         device->flag=1;
  186.  
  187.         msg_dbg(" go task intmsg_erruptable");
  188.         current->state=TASK_INTERRUPTIBLE;
  189.  
  190.         msg_dbg(" start dma read transfer");
  191.  
  192.         v32 = ioread32((void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_MODE));
  193.         v32 |= START_RD;
  194.         iowrite32(v32, (void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_MODE));
  195.  
  196.         schedule_timeout(msecs_to_jiffies(TIME_MAX));
  197.  
  198.         current->state=TASK_RUNNING;
  199.         remove_wait_queue(&device->intr_wait,&wait);
  200.         msg_dbg(" waiting finished");
  201.  
  202.         if(device->flag==1) {
  203.             msg_err(" write timed out.., abort!!!");
  204.             //      break;
  205.         }
  206.  
  207.         msg_dbg(" PCI_DMA_TODEVICE loop pages %d finished",n);
  208.  
  209.         __xclenup_dma:
  210.  
  211.         msg_dbg(" pci_dma_sync_sg_for_cpu start");
  212.         pci_dma_sync_sg_for_cpu( device->pdev,
  213.                 device->dma_r.sglist,
  214.                 device->dma_r.sglen,
  215.                 device->dma_r.direction);
  216.  
  217.         msg_dbg(" pci_dma_sync_sg_for_cpu ok");
  218.         for (i=0;i<device->dma_r.nr_pages;i++) {
  219.             if (! PageReserved(device->dma_r.pages[i]))
  220.             SetPageDirty(device->dma_r.pages[i]);
  221.             page_cache_release(device->dma_r.pages[i]);
  222.         }
  223.  
  224.         msg_dbg(" pci_unmap_sg start");
  225.         pci_unmap_sg(device->pdev,
  226.                 device->dma_r.sglist,
  227.                 device->dma_r.nr_pages,
  228.                 device->dma_r.direction);
  229.         msg_dbg(" pci_unmap_sg ok");
  230.  
  231.         if (device->flag==1)
  232.         retval=-EIO;
  233.         else if (device->flag==2)
  234.         retval = -ENOBUFS;
  235.         else
  236.         retval=count;
  237.  
  238.         __xexit:
  239.         kfree(device->dma_r.sglist);
  240.         kfree(device->dma_r.pages);
  241.  
  242.     }
  243.     else
  244.     {
  245.         msg_err(" mapping msg_error");
  246.     }
  247.     return retval;
  248. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement