static int
rsp_setup_dma (struct rsp_dma *dma, int direction, unsigned long data, u32 size)
{
u32 first, last;
int msg_err;
int s = 0;
dma->direction = direction;
first = (data & PAGE_MASK) >> PAGE_SHIFT; // first page number
last = ((data + size - 1) & PAGE_MASK) >> PAGE_SHIFT; // last page number
dma->offset = data & ~PAGE_MASK;
dma->nr_pages = last - first + 1;
dma->tail = 1 + ((data + size - 1) & ~PAGE_MASK);
msg_dbg("dma nr_pages %d",dma->nr_pages);
dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*), GFP_KERNEL);
if (!dma->pages)
{
msg_err("NULL == dma->pages");
return -ENOMEM;
}
msg_dbg("init user [0x%lx+0x%x => %d pages]",data,size,dma->nr_pages);
down_read(¤t->mm->mmap_sem);
msg_err = get_user_pages(current, current->mm, data & PAGE_MASK,
dma->nr_pages, direction == PCI_DMA_FROMDEVICE, 0,
dma->pages, NULL);
up_read(¤t->mm->mmap_sem);
if (msg_err == dma->nr_pages)
{
msg_dbg("get_user_pages ok, %d pages",msg_err);
return 0;
} else
{
msg_err("get_user_pages failed, msg_err=%d must be %d",msg_err,dma->nr_pages);
return -EINVAL;
}
}
struct scatterlist*
rsp_setup_sglist(struct page **pages, int nr_pages, int offset, int tail)
{
struct scatterlist *sglist;
int i, o;
if (pages == NULL)
{
msg_err("rsp_setup_sglist error pages == NULL");
return NULL;
}
sglist = kmalloc(sizeof(*sglist) * nr_pages, GFP_KERNEL);
if (NULL == sglist)
{
msg_err("rsp_setup_sglist kmalloc error sglist==NULL");
return NULL;
}
memset(sglist, 0, sizeof(*sglist) * nr_pages);
for (i = 0; i < nr_pages; i++)
{
if (pages[i] == NULL) {
msg_err("rsp_setup_sglist error page %d == NULL",i);
kfree(sglist);
return NULL;
}
if (i == 0) {
sg_set_page(&sglist[i], pages[i], PAGE_SIZE - offset, offset);
} else if (i == nr_pages - 1) {
sg_set_page(&sglist[i], pages[i], tail, 0);
} else
sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
}
return sglist;
}
irqreturn_t
fpga_int_handler (int irq, void * dev_id)
{
u32 job_id;
u32 v32;
msg_dbg("fpga_int_handler started\n");
struct rsp_device *rsp_device = (struct rsp_device *) dev_id;
if (irq != rsp_device->intr)
{
msg_err("msg_error in fpga_int_handler:irq:%i!=intr:%i",irq,rsp_device->intr);
return IRQ_NONE;
}
v32 = ioread32 ((void *)((unsigned long)rsp_device->bar[BAR_NUMBER].vaddr + M_MODE));
msg_dbg ("v32 = %d\n", v32);
if ((int)v32 == -1)
return IRQ_NONE;
if (!((v32 & COMPL_WR) || (v32 & COMPL_RD)))
return IRQ_NONE;
// msg_dbg("fpga_int_handler started\n");
if (rsp_device->direction == READ)
fpga_finish_dma_write(rsp_device);
else
fpga_finish_dma_read(rsp_device);
job_id = fpga_scheduler_dma_job_finished (fpga_task_scheduler);
// }
v32 &= ~(COMPL_WR | COMPL_RD | INT_REG);
msg_dbg("Mark job %u as done\n", job_id);
spin_lock(&jobs_lock);
if (job_id != 0)
mark_job_as_done(job_id);
spin_unlock(&jobs_lock);
iowrite32(v32,(void *)((unsigned long)rsp_device->bar[BAR_NUMBER].vaddr + M_MODE));
msg_dbg("fpga_int_handler finished\n");
return IRQ_HANDLED;
}
int
fpga_push_data_to_device (struct rsp_device *rsp_dev, u64 host_addr, u64 size)
{
int num;
int i;
u32 v32, l32;
u32 cur_buf_addr, cur_buf_length, tail;
// iowrite32((u32)size, (void *)((unsigned long)rsp_dev->bar[2].vaddr + WMP_WRITE_LEN));
// iowrite32(INT_PCIE_ZBT_RX, (void *)((unsigned long)rsp_dev->bar[2].vaddr + WMP_INTCON_REG));
rsp_dev->direction = READ;
rsp_dev->dma_r.direction = PCI_DMA_TODEVICE;
rsp_dev->intr_mask = 0x02 << REG_SHIFT_R;
if (rsp_setup_dma(&(rsp_dev->dma_r), PCI_DMA_TODEVICE, host_addr, size) != 0)
return -1;
rsp_dev->dma_r.sglist = rsp_setup_sglist (rsp_dev->dma_r.pages,
rsp_dev->dma_r.nr_pages,
rsp_dev->dma_r.offset,
rsp_dev->dma_r.tail);
if (rsp_dev->dma_r.sglist == NULL)
{
kfree(rsp_dev->dma_r.pages);
return -1;
}
rsp_dev->dma_r.sglen = pci_map_sg (rsp_dev->pdev,
rsp_dev->dma_r.sglist,
rsp_dev->dma_r.nr_pages,
rsp_dev->dma_r.direction);
if (rsp_dev->dma_r.sglen == 0)
{
kfree(rsp_dev->dma_r.sglist);
rsp_dev->dma_r.sglist = NULL;
return -1;
}
pci_dma_sync_sg_for_device (rsp_dev->pdev,
rsp_dev->dma_r.sglist,
rsp_dev->dma_r.sglen,
rsp_dev->dma_r.direction);
num = rsp_dev->dma_r.nr_pages;
if (num > MAX_DMA_PAGES)
num = MAX_DMA_PAGES;
for (i = 0; i < num; i++) {
msg_dbg(" PCI_DMA_FROMDEVICE page %d go",i);
cur_buf_addr = sg_dma_address(&rsp_dev->dma_r.sglist[i]);
cur_buf_length = sg_dma_len(&rsp_dev->dma_r.sglist[i]);
msg_dbg(" DMA write allocated buffer v32=%x, l32=%x", cur_buf_addr, cur_buf_length);
while (cur_buf_length > 0) {
tail = PAGE_SIZE - (cur_buf_addr & (PAGE_SIZE - 1));
l32 = (cur_buf_length < tail) ? cur_buf_length : tail;
v32 = cur_buf_addr;
msg_dbg(" DMA write transaction buffer v32=%x, l32=%x, tail=%x, cbl=%x",
v32, l32, tail, cur_buf_length);
iowrite32(
v32,
(void *) ((unsigned long) rsp_dev->bar[BAR_NUMBER].vaddr
+ M_RD_ADDR));
iowrite32(
l32,
(void *) ((unsigned long) rsp_dev->bar[BAR_NUMBER].vaddr
+ M_RD_CNT));
cur_buf_addr += l32;
cur_buf_length -= l32;
}
}
v32 = ioread32((void *)((unsigned long)rsp_dev->bar[BAR_NUMBER].vaddr + M_MODE));
v32 |= START_RD;
iowrite32(v32, (void *)((unsigned long)rsp_dev->bar[BAR_NUMBER].vaddr + M_MODE));
return 0;
}
static void
fpga_finish_dma_write (struct rsp_device * device)
{
int i;
msg_dbg("Finish DMA write started\n");
pci_dma_sync_sg_for_cpu(device->pdev,
device->dma_r.sglist,
device->dma_r.sglen,
device->dma_r.direction);
for (i = 0; i < device->dma_r.nr_pages; i++)
{
if (!PageReserved(device->dma_r.pages[i]))
SetPageDirty(device->dma_r.pages[i]);
page_cache_release(device->dma_r.pages[i]);
}
pci_unmap_sg (device->pdev, device->dma_r.sglist,
device->dma_r.nr_pages, device->dma_r.direction);
kfree(device->dma_r.sglist);
kfree(device->dma_r.pages);
msg_dbg("Finish DMA write success\n");
}