int rsp_setup_dma(struct rsp_dma *dma, int direction, unsigned long data, u32 size) { u32 first, last; int msg_err; // PAGE_SHIFT = 12 // PAGE_SIZE = 4k // PAGE_MASK = ~(PAGE_SIZE - 1) = ~(0x0000 0FFF) = 0xFFFF F000 dma->direction = direction; first = (data & PAGE_MASK) >> PAGE_SHIFT; // first page number last = ((data + size - 1) & PAGE_MASK) >> PAGE_SHIFT; // last page number dma->offset = data & ~PAGE_MASK; dma->nr_pages = last - first + 1; msg_dbg("dma nr_pages %d", dma->nr_pages); dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*), GFP_KERNEL); if (!dma->pages) { msg_err("NULL == dma->pages"); return -ENOMEM; } msg_dbg("init user [0x%lx+0x%x => %d pages]", data, size, dma->nr_pages); down_read(¤t->mm->mmap_sem); msg_err = get_user_pages(current, current->mm, data & PAGE_MASK, dma->nr_pages, direction == PCI_DMA_FROMDEVICE, 1, dma->pages, NULL); up_read(¤t->mm->mmap_sem); if (msg_err == dma->nr_pages) { msg_dbg("get_user_pages ok, %d pages", msg_err); return 0; } else { msg_err("get_user_pages failed, msg_err=%d must be %d", msg_err, dma->nr_pages); return -EINVAL; } } int dma_uninit_user(struct rsp_dma *dma) { if (dma == NULL) { msg_err("dma_uninit_user dma==NULL"); return 0; } kfree(dma->sglist); kfree(dma->pages); return 0; } struct scatterlist* rsp_setup_sglist(struct page **pages, int nr_pages, int offset) { struct scatterlist *sglist; int i, o; if (pages == NULL) { msg_err("rsp_setup_sglist error pages == NULL"); return NULL; } sglist = kmalloc(sizeof(*sglist) * nr_pages, GFP_KERNEL); if (NULL == sglist) { msg_err("rsp_setup_sglist kmalloc error sglist==NULL"); return NULL; } memset(sglist, 0, sizeof(*sglist) * nr_pages); for (i = 0; i < nr_pages; i++) { if (PageHighMem(pages[i])) msg_dbg("page in high memory\n"); else msg_dbg("page in low memory\n"); if (i == 0) o = offset; else o = 0; if (pages[i] == NULL) { msg_err("rsp_setup_sglist error page %d == NULL", i); kfree(sglist); return NULL; } sg_set_page(&sglist[i], pages[i], PAGE_SIZE - o, o); // sglist[i].page_link = (unsigned long) pages[i]; // sglist[i].length = PAGE_SIZE - o; } return sglist; } static ssize_t rsp_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct rsp_device *device=file->private_data; ssize_t retval=0; u32 v32,l32; int i,n; msg_dbg(" Entering function:%s\n", __FUNCTION__); if(device==NULL) { msg_err(" rsp_write device==NULL"); return 0; } msg_dbg(" User buffer address = 0x%x\n", buf); device->direction=READ; device->dma_r.direction=PCI_DMA_TODEVICE; device->intr_mask=0x02<dma_r),PCI_DMA_TODEVICE,(unsigned long)buf,count)==0) { msg_dbg(" mapping ok"); msg_dbg(" offset:%X",device->dma_r.offset); unsigned long l_addr, p_addr; l_addr = (unsigned long)page_address( device->dma_r.pages[0]); if (l_addr != NULL) { msg_dbg(" page logical addr = 0x%x\n",l_addr); p_addr = __pa(l_addr); if (p_addr != NULL) msg_dbg(" page phy addr = 0x%x\n",p_addr); else msg_dbg(" page phy addr = NULL\n"); } else msg_dbg(" page logical addr = NULL\n"); device->dma_r.sglist=rsp_setup_sglist(device->dma_r.pages, device->dma_r.nr_pages, device->dma_r.offset); if (device->dma_r.sglist==NULL) { msg_err(" msg_error generating ScatterGather List, aborting writing"); return -EIO; } device->dma_r.sglen = pci_map_sg(device->pdev, device->dma_r.sglist, device->dma_r.nr_pages, device->dma_r.direction); if (device->dma_r.sglen==0) { msg_err(" pci_map_sg failed\n"); kfree(device->dma_r.sglist); device->dma_r.sglist = NULL; device->dma_r.sglen = 0; retval=-EIO; goto __xexit; } msg_dbg(" mapping OK: %i buffers",device->dma_r.sglen); pci_dma_sync_sg_for_device(device->pdev, device->dma_r.sglist, device->dma_r.sglen, device->dma_r.direction); msg_dbg(" setup dma controller for read"); n=device->dma_r.nr_pages; if(n>MAX_DMA_PAGES)n=MAX_DMA_PAGES; for (i=0;idma_r.sglist[i]); if (v32 & (~PAGE_MASK) ) { msg_err("DMA buffer is not alligned to 4kb, v32=0x%x", v32); device->flag=2; goto __xclenup_dma; } } for (i=0;idma_r.sglist[i]); l32=sg_dma_len(&device->dma_r.sglist[i]); msg_dbg(" write Master Write Address %x,%x",v32,l32); iowrite32(v32, (void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_RD_ADDR)); iowrite32(l32, (void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_RD_CNT)); } msg_dbg(" prepare wait queue"); DECLARE_WAITQUEUE(wait,current); add_wait_queue(&device->intr_wait,&wait); device->flag=1; msg_dbg(" go task intmsg_erruptable"); current->state=TASK_INTERRUPTIBLE; msg_dbg(" start dma read transfer"); v32 = ioread32((void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_MODE)); v32 |= START_RD; iowrite32(v32, (void *)((unsigned long)device->bar[CNTL_BAR].vaddr + M_MODE)); schedule_timeout(msecs_to_jiffies(TIME_MAX)); current->state=TASK_RUNNING; remove_wait_queue(&device->intr_wait,&wait); msg_dbg(" waiting finished"); if(device->flag==1) { msg_err(" write timed out.., abort!!!"); // break; } msg_dbg(" PCI_DMA_TODEVICE loop pages %d finished",n); __xclenup_dma: msg_dbg(" pci_dma_sync_sg_for_cpu start"); pci_dma_sync_sg_for_cpu( device->pdev, device->dma_r.sglist, device->dma_r.sglen, device->dma_r.direction); msg_dbg(" pci_dma_sync_sg_for_cpu ok"); for (i=0;idma_r.nr_pages;i++) { if (! PageReserved(device->dma_r.pages[i])) SetPageDirty(device->dma_r.pages[i]); page_cache_release(device->dma_r.pages[i]); } msg_dbg(" pci_unmap_sg start"); pci_unmap_sg(device->pdev, device->dma_r.sglist, device->dma_r.nr_pages, device->dma_r.direction); msg_dbg(" pci_unmap_sg ok"); if (device->flag==1) retval=-EIO; else if (device->flag==2) retval = -ENOBUFS; else retval=count; __xexit: kfree(device->dma_r.sglist); kfree(device->dma_r.pages); } else { msg_err(" mapping msg_error"); } return retval; }