Advertisement
milanmetal

AXI DMA Lusher driver

Dec 14th, 2018
122
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 16.75 KB | None | 0 0
  1. /*  luscher.c - A char type DMA device using link list
  2.  */
  3.  
  4. // source: https://forums.xilinx.com/t5/Embedded-Linux/AXI-DMA-with-Zynq-Running-Linux/m-p/522755/highlight/true#M10649
  5.  
  6. #include <linux/kernel.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/io.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/fs.h>
  14. #include <linux/proc_fs.h>
  15. #include <linux/errno.h>    /* error codes */
  16. #include <linux/types.h>    /* size_t */
  17. #include <linux/fcntl.h>    /* O_ACCMODE */
  18. #include <linux/seq_file.h>
  19. #include <linux/cdev.h>
  20. #include <asm/uaccess.h>
  21.  
  22. #include <linux/of_address.h>
  23. #include <linux/of_device.h>
  24. #include <linux/of_platform.h>
  25.  
  26. MODULE_LICENSE("GPL");
  27. MODULE_AUTHOR
  28.     ("Dave Warren - Luscher UK ltd");
  29. MODULE_DESCRIPTION
  30.     ("luscher - loadable module dma driver");
  31.  
  32. #define DRIVER_NAME "luscher"
  33.  
  34. /* DMA state */
  35. #define DMA_IDLE    1   // Can accept data
  36. #define DMA_STARTED 2
  37. #define DMA_DATA_COMPLETE 3 // All buffers here
  38.  
  39. /* AXI DMA defs */
  40. /* Offsets to registers (as long), simple mode */
  41. #define MM2S_DMACR  0       // Control Reg
  42. #define MM2S_DMASR  1       // Status reg
  43. #define MM2S_SA     6       // Start address
  44. #define MM2S_LENGHT 10      // DMA length
  45.  
  46. /* Extra for SG mode DMA */
  47. #define MM2S_CDESC  2       // Current descriptor ptr (write before DMA, read only while DMA active)
  48. #define MM2S_TDESC  4       // Tail descriptor ptr (write to start DMA)
  49.  
  50.  
  51. /* Control Bits */
  52.  
  53. #define DMACR_RUNSTOP       1<<0
  54. #define DMACR_RESET     1<<2
  55.  
  56. /* Status bits */
  57. #define DMASR_HALTED        1<<0
  58. #define DMASR_IDLE      1<<1
  59.  
  60.  
  61. /* Descriptor offsets */
  62. #define DESC_SIZE       0x40    // Size of a descriptor
  63. #define DESC_NXTDESC        0   // Pointer to next
  64. #define DESC_BUFFER_ADDRESS 8   // Data address
  65. #define DESC_CONTROL        0x18    //
  66. #define DESC_STATUS     0x1c    //
  67.  
  68. /* DESC_CONTROL FLAGS */
  69. #define TXSOF           1<<27   // Start of frame
  70. #define TXEOF           1<<26   // End of frame
  71.  
  72. /* DESC_STATUS FLAGS */
  73. #define CMPLT           1<<31   // Completed
  74.  
  75. /*
  76.  * Ioctl definitions
  77.  */
  78.  
  79. /* Use 0xAA as magic number */
  80. #define LUSCHER_IOC_MAGIC  0xAA
  81.  
  82. #define LUSCHER_IOCRESET        _IO(LUSCHER_IOC_MAGIC, 0)
  83. #define LUSCHER_SETSIZE     _IOW(LUSCHER_IOC_MAGIC,  1, int)
  84. #define LUSCHER_IOC_MAXNR 1
  85.  
  86.  
  87. int luscher_major = 0; /* major device node */
  88. int luscher_minor = 0;
  89.  
  90. // Private data allocated on module load
  91. struct luscher_local {
  92.     // The platform device pointer
  93.     //struct platform_device *pdev;
  94.     // The AXI DMA
  95.     int irq;
  96.     unsigned long mem_start;
  97.     unsigned long mem_end;
  98.     void __iomem *base_addr;
  99.     // The allocated DMA buffers
  100.     u32 buff_size;
  101.     u32 buff_number;
  102.     phys_addr_t b_phys;    /* buffer physical */
  103.     void __iomem *b_virt;  /* buffer virtual */
  104.     int bsize; /* size ask to alloc */
  105.     // DMA info
  106.     int dma_bytes; // Size of DMA
  107.     int dma_b_pos; // where we are in current buffer
  108.     int dma_state; // what we are doing
  109.     int dma_next_in;// next buffer
  110.     // File info
  111.     wait_queue_head_t inq;    /* input que for block */
  112.     struct semaphore sem;     /* mutual exclusion semaphore     */
  113.     struct cdev cdev;     /* Char device structure      */
  114. };
  115.  
  116. /* prototypes of functions */
  117. ssize_t luscher_write(struct file *filp, const char __user *buf, size_t count,
  118.                     loff_t *f_pos);
  119. //ssize_t luscher_read(struct file *filp, const char __user *buf, size_t count,
  120. //                    loff_t *f_pos);
  121. long     luscher_ioctl(struct file *filp,
  122.                     unsigned int cmd, unsigned long arg);
  123. int luscher_open(struct inode *inode, struct file *filp);
  124. int luscher_release(struct inode *inode, struct file *filp);
  125.  
  126. static int luscher_init(void);
  127. static void luscher_exit(void);
  128.  
  129. struct file_operations luscher_fops = {
  130.     .owner = THIS_MODULE,
  131.     //.llseek = scull_llseek,
  132.     //.read = luscher_read,
  133.     .write = luscher_write,
  134.     .compat_ioctl =  luscher_ioctl, /* not called */
  135.     .unlocked_ioctl = luscher_ioctl,
  136.     .open = luscher_open,
  137.     .release = luscher_release,
  138. };
  139.  
  140. /*
  141.  * Open and close
  142.  */
  143.  
  144. int luscher_open(struct inode *inode, struct file *filp)
  145. {
  146.     struct luscher_local *lp; /* device information */
  147.  
  148.     lp = container_of(inode->i_cdev, struct luscher_local, cdev);
  149.     filp->private_data = lp; /* for other methods */
  150.  
  151.     printk("open\n");
  152.  
  153.    
  154.     return 0;          /* success */
  155. }
  156.  
  157. int luscher_release(struct inode *inode, struct file *filp)
  158. {
  159.     struct luscher_local *lp = filp->private_data;
  160.     int y;
  161.     int *dma_reg;
  162.     int *p;
  163.  
  164.     p = (int *)(lp->b_virt + (lp->buff_size * lp->buff_number));
  165.     dma_reg = (int *)lp->base_addr;
  166.  
  167.     dma_reg[MM2S_DMACR] = DMACR_RESET;
  168.     lp->dma_next_in = 0;
  169.     lp->dma_b_pos = 0;
  170.     dma_reg[MM2S_DMACR] = 0;
  171.     lp->dma_state = DMA_IDLE;
  172.     lp->dma_bytes = 0x100000;
  173.     printk("release\n");
  174.     for(y=0; y < lp->buff_number; y++)
  175.         p[(DESC_STATUS + (DESC_SIZE * y))/4] = CMPLT; // mark as empty
  176.  
  177.     return 0;
  178. }
  179.  
  180. //ssize_t luscher_read(struct file *filp, const char __user *buf, size_t count,
  181. //                loff_t *f_pos)
  182. //{
  183. //  return count;
  184. //}
  185.  
  186. ssize_t luscher_write(struct file *filp, const char __user *buf, size_t count,
  187.                 loff_t *f_pos)
  188. {
  189.     struct luscher_local *lp = filp->private_data;
  190.     int last_buffer = 0;
  191.     int *p;  /* pointer to buffer mem */
  192.     int *dma_reg; /* pointer to DMA registers */
  193.     int dma_status;
  194.     int control;
  195.     int y;
  196.     int start_dma = 0;
  197.    
  198.     ssize_t retval = -ENOMEM; /* value used in "goto out" statements */
  199.  
  200.     p = (int *)(lp->b_virt + (lp->buff_size * lp->buff_number));
  201.     dma_reg = (int *)lp->base_addr;
  202.  
  203.     /*return count;  /* hack */
  204.  
  205.     if (down_interruptible(&lp->sem))
  206.         return -ERESTARTSYS;
  207.  
  208.     if (lp->dma_state == DMA_DATA_COMPLETE)
  209.         goto out;
  210.     /* Do blocking write if no space in the buffer */
  211.     /* control = p[(DESC_STATUS + (DESC_SIZE * lp->dma_next_in))/4];
  212.     printk("control %x", control); */
  213.     while ((p[(DESC_STATUS + (DESC_SIZE * lp->dma_next_in))/4] & CMPLT) == 0){ /* buffer not free */
  214.         up(&lp->sem); /* release the lock */
  215.         if (filp->f_flags & O_NONBLOCK)
  216.             return -EAGAIN;
  217.         wait_event_interruptible_timeout(lp->inq,
  218.             ( p[(DESC_STATUS + (DESC_SIZE * lp->dma_next_in))/4] & CMPLT ) , 2 );
  219.         //if (wait_event_interruptible(lp->inq,
  220.         //  ( p[(DESC_STATUS + (DESC_SIZE * lp->dma_next_in))/4] & CMPLT ) ) )
  221.         //  return -ERESTARTSYS; /* signal: tell the fs layer to handle it */
  222.         /* otherwise loop, but first reacquire the lock */
  223.         if (down_interruptible(&lp->sem))
  224.             return -ERESTARTSYS;
  225.     }
  226.  
  227.     /* write only up to the end of this dma */
  228.     if ((count >= lp->dma_bytes ) && (count <= lp->buff_size - lp->dma_b_pos)){
  229.         last_buffer = 2;
  230.         count = lp->dma_bytes;
  231.     }
  232.     /* write to end of buffer */
  233.     else if (count >= lp->buff_size - lp->dma_b_pos) {
  234.         last_buffer = 1;
  235.         count = lp->buff_size - lp->dma_b_pos;
  236.     }
  237.  
  238.     if (copy_from_user(lp->b_virt + (lp->dma_next_in * lp->buff_size) + lp->dma_b_pos, buf, count)) {
  239.         retval = -EFAULT;
  240.         goto out;
  241.     }
  242.     *f_pos += count;
  243.     lp->dma_bytes -= count;
  244.     lp->dma_b_pos += count;
  245.     if(last_buffer ) { // switch to next buffer, and advance DMA pointer
  246.         //printk("dma status %x cdesc %x\n", dma_reg[MM2S_DMASR],dma_reg[MM2S_CDESC]);
  247.         p[(DESC_STATUS + (DESC_SIZE * lp->dma_next_in))/4] = 0; /* buffer full */
  248.         control = lp->dma_b_pos;
  249.         if(lp->dma_state == DMA_IDLE) {
  250.             control |= TXSOF ;
  251.             lp->dma_state = DMA_STARTED;
  252.             wmb();
  253.             printk("DMA_STARTED\n");
  254.             start_dma = 1;
  255.             //for(y = 0; y < (lp->buff_number * DESC_SIZE / 4) ; y = y + 4)
  256.             //  printk("%x %x %x %x\n", p[y], p[y + 1], p[y + 2], p[y+3]);
  257.            
  258.            
  259.         }
  260.         if(last_buffer == 2) {
  261.             control |= TXEOF;
  262.             lp->dma_state = DMA_DATA_COMPLETE;
  263.             wmb();
  264.             printk("DMA_DATA_COMPLETE\n");
  265.         }
  266.         p[(DESC_CONTROL + (DESC_SIZE * lp->dma_next_in))/4] = control; /* size */
  267.         if (start_dma) {
  268.             dma_status = dma_reg[MM2S_DMASR];
  269.             if(dma_status & DMASR_HALTED) {
  270.                 dma_reg[MM2S_CDESC] = (lp->b_phys + (lp->buff_size * lp->buff_number));
  271.                 wmb();
  272.                 dma_reg[MM2S_DMACR] = DMACR_RUNSTOP;  /* Start DMA */
  273.                 do {
  274.                     dma_status = dma_reg[MM2S_DMASR];
  275.                 }
  276.                 while(dma_status & DMASR_HALTED) ;
  277.             }
  278.         }
  279.         //dma_map_single(&lp->pdev->dev, lp->b_virt + (lp->dma_next_in * lp->buff_size),
  280.         //          lp->buff_size, DMA_TO_DEVICE);
  281.         wmb();
  282.         dma_reg[MM2S_TDESC] = lp->b_phys + (lp->buff_size * lp->buff_number)
  283.                     + (DESC_SIZE * lp->dma_next_in); /* tail pointer advance */
  284.         if(++lp->dma_next_in >= lp->buff_number)
  285.             lp->dma_next_in = 0;
  286.         lp->dma_b_pos = 0;
  287.         //printk("nextIn %d\n", lp->dma_next_in);
  288.     }
  289.    
  290.     retval = count;
  291.  
  292.   out:
  293.     up(&lp->sem);
  294.     return retval;
  295. }
  296.  
  297. /*
  298.  * The ioctl() implementation
  299.  */
  300.  
  301. long luscher_ioctl(/*struct inode *inode, */ struct file *filp,
  302.                  unsigned int cmd, unsigned long arg)
  303. {
  304.     struct luscher_local *lp = filp->private_data;
  305.     int *p;  /* pointer to buffer mem */
  306.     int *dma_reg; /* pointer to DMA registers */
  307.     int err = 0, y;
  308.     long retval = 0;
  309.  
  310.     /* printk("ioctl %d\n", cmd); /* debug ioctrl */
  311.    
  312.     /*
  313.      * extract the type and number bitfields, and don't decode
  314.      * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
  315.      */
  316.     if (_IOC_TYPE(cmd) != LUSCHER_IOC_MAGIC) return -ENOTTY;
  317.     if (_IOC_NR(cmd) > LUSCHER_IOC_MAXNR) return -ENOTTY;
  318.  
  319.     /*
  320.      * the direction is a bitmask, and VERIFY_WRITE catches R/W
  321.      * transfers. `Type' is user-oriented, while
  322.      * access_ok is kernel-oriented, so the concept of "read" and
  323.      * "write" is reversed
  324.      */
  325.     if (_IOC_DIR(cmd) & _IOC_READ)
  326.         err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
  327.     else if (_IOC_DIR(cmd) & _IOC_WRITE)
  328.         err =  !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
  329.     if (err) return -EFAULT;
  330.  
  331.     p = (int *)(lp->b_virt + (lp->buff_size * lp->buff_number));
  332.     dma_reg = (int *)lp->base_addr;
  333.  
  334.     switch(cmd) {
  335.  
  336.       case LUSCHER_IOCRESET: /* Reset hardware and pointers, abort the DMA */
  337.         dma_reg[MM2S_DMACR] = DMACR_RESET;
  338.         lp->dma_next_in = 0;
  339.         lp->dma_b_pos = 0;
  340.         dma_reg[MM2S_DMACR] = 0;
  341.         lp->dma_state = DMA_DATA_COMPLETE; /* any write returns */
  342.         lp->dma_bytes = 0x100000;
  343.         for(y=0; y < lp->buff_number; y++)
  344.             p[(DESC_STATUS + (DESC_SIZE * y))/4] = CMPLT; // mark as empty
  345.         //for(y = 0; y < (lp->buff_number * DESC_SIZE / 4) ; y = y + 4)
  346.         //  printk("%x %x %x %x\n", p[y], p[y + 1], p[y + 2], p[y+3]);
  347.         /* printk("ioctl - reset\n"); */
  348.         break;
  349.        
  350.       case LUSCHER_SETSIZE: /* Set: size of transfer */
  351.         dma_reg[MM2S_DMACR] = DMACR_RESET;
  352.         lp->dma_next_in = 0;
  353.         lp->dma_b_pos = 0;
  354.         dma_reg[MM2S_DMACR] = 0;
  355.         lp->dma_state = DMA_IDLE;
  356.         for(y=0; y < lp->buff_number; y++)
  357.             p[(DESC_STATUS + (DESC_SIZE * y))/4] = CMPLT; // mark as empty
  358.         /* for(y = 0; y < (lp->buff_number * DESC_SIZE / 4) ; y = y + 4)
  359.             printk("%x %x %x %x\n", p[y], p[y + 1], p[y + 2], p[y+3]); */
  360.         retval = __get_user(lp->dma_bytes, (int __user *)arg);
  361.         /* printk("ioctl - DMA %d bytes\n", lp->dma_bytes); */
  362.         break;
  363.  
  364.  
  365.       default:  /* redundant, as cmd was checked against MAXNR */
  366.         return -ENOTTY;
  367.     }
  368.     return retval;
  369.  
  370. }
  371.  
  372.  
  373.  
  374. static irqreturn_t luscher_irq(int irq, void *lp)
  375. {  // int service
  376.     printk("luscher interrupt\n");
  377.     return IRQ_HANDLED;
  378. }
  379.  
  380. static int luscher_probe(struct platform_device *pdev)
  381. {
  382.     struct resource *r_irq; /* Interrupt resources */
  383.     struct resource *r_mem; /* IO mem resources */
  384.     struct device *dev = &pdev->dev;
  385.     struct luscher_local *lp = NULL;
  386.     struct device_node *np = pdev->dev.of_node;
  387.     dev_t devno = 0;
  388.  
  389.     int y;
  390.     int *p;
  391.     unsigned int x;
  392.     int rc = 0;
  393.  
  394.     rc = LUSCHER_IOCRESET;
  395.     y = LUSCHER_SETSIZE;
  396.     /* printk(" IOCTL %d %d\n",rc,y); */
  397.  
  398.  
  399.     /* Dynamic major minor device numbers */
  400.     rc = alloc_chrdev_region(&devno, luscher_minor, 1,
  401.                 "luscher");
  402.     luscher_major = MAJOR(devno);
  403.  
  404.     if (rc < 0) {
  405.         printk(KERN_WARNING "luscher: can't get major %d\n", luscher_major);
  406.         return rc;
  407.         }
  408.    
  409.     dev_info(dev, "Device Tree Probing\n");
  410.  
  411.     /* Get iospace for the device */
  412.     r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  413.     if (!r_mem) {
  414.         dev_err(dev, "invalid address\n");
  415.         return -ENODEV;
  416.     }
  417.    
  418.     lp = (struct luscher_local *) kmalloc(sizeof(struct luscher_local), GFP_KERNEL);
  419.     if (!lp) {
  420.         dev_err(dev, "Cound not allocate luscher local memory\n");
  421.         return -ENOMEM;
  422.     }
  423.    
  424.     dev_set_drvdata(dev, lp); /* set private data */
  425.    
  426.     lp->mem_start = r_mem->start;
  427.     lp->mem_end = r_mem->end;
  428.  
  429.     if (!request_mem_region(lp->mem_start,
  430.                 lp->mem_end - lp->mem_start + 1,
  431.                 DRIVER_NAME)) {
  432.         dev_err(dev, "Couldn't lock memory region at %p\n",
  433.             (void *)lp->mem_start);
  434.         rc = -EBUSY;
  435.         goto error1;
  436.     }
  437.  
  438.     lp->base_addr = ioremap(lp->mem_start, lp->mem_end - lp->mem_start + 1);
  439.     if (!lp->base_addr) {
  440.         dev_err(dev, "luscher: Could not allocate iomem\n");
  441.         rc = -EIO;
  442.         goto error2;
  443.     }
  444.  
  445.     /* Get IRQ for the device */
  446.     r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  447.     if (!r_irq) {
  448.         dev_info(dev, "no IRQ found\n");
  449.         dev_info(dev, "luscher at 0x%08x mapped to 0x%08x\n",
  450.             (unsigned int __force)lp->mem_start,
  451.             (unsigned int __force)lp->base_addr);
  452.         return 0;
  453.     }
  454.     lp->irq = r_irq->start;
  455.    
  456.     rc = request_irq(lp->irq, &luscher_irq, 0, DRIVER_NAME, lp);
  457.     if (rc) {
  458.         dev_err(dev, "testmodule: Could not allocate interrupt %d.\n",
  459.             lp->irq);
  460.         goto error3;
  461.     }
  462.  
  463.     dev_info(dev,"luscher at 0x%08x mapped to 0x%08x, irq=%d\n",
  464.         (unsigned int __force)lp->mem_start,
  465.         (unsigned int __force)lp->base_addr,
  466.         lp->irq);
  467.  
  468.     of_property_read_u32(np, "buff-size", &lp->buff_size);
  469.     printk(" buff size 0x%08x\n", lp->buff_size);
  470.     of_property_read_u32(np, "buff-number", &lp->buff_number);
  471.     printk(" buff number 0x%08x\n", lp->buff_number);
  472.  
  473.     /* ask for the buffers and a descriptor chain */
  474.     lp->bsize = (lp->buff_size * lp->buff_number) + (lp->buff_number * DESC_SIZE);
  475.     /* try to alloc the buffer */
  476.     lp->b_virt = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(lp->bsize),
  477.                         &lp->b_phys, GFP_KERNEL);
  478.     if (!lp->b_virt) {
  479.         dev_err(&pdev->dev,
  480.             "Buffer memory allocation failed\n");
  481.         printk("Buffer memory allocation failed\n");
  482.         rc = -ENOMEM;
  483.         goto error3;
  484.     }
  485.     printk("DMA buffer at %x size %d\n", lp->b_phys, lp->bsize);
  486.     printk("SG table at %x\n", (lp->b_phys + (lp->buff_size * lp->buff_number)));
  487.     // clear buffer
  488.     memset(lp->b_virt,0,lp->bsize);
  489.     /* fill in some addresses */
  490.     p = (int *)(lp->b_virt + (lp->buff_size * lp->buff_number));
  491.     x = (unsigned int) lp->b_phys;
  492.     for(y=0; y < lp->buff_number; y++)
  493.       { // circular linked list
  494.         p[(DESC_CONTROL + (DESC_SIZE * y))/4] = lp->buff_size;
  495.         p[(DESC_STATUS + (DESC_SIZE * y))/4] = CMPLT; // mark as empty
  496.         p[(DESC_BUFFER_ADDRESS + (DESC_SIZE * y))/4] = x + (y * lp->buff_size);  // physical address
  497.         if((y + 1) == lp->buff_number)
  498.             p[(DESC_NXTDESC + (DESC_SIZE * y))/4] = x + (lp->buff_number * lp->buff_size); // back to top
  499.         else
  500.             p[(DESC_NXTDESC + (DESC_SIZE * y))/4] = x + (lp->buff_number * lp->buff_size) + ((y + 1) * DESC_SIZE);
  501.       }
  502.     lp->dma_state = DMA_DATA_COMPLETE;
  503.     lp->dma_bytes = 0x1000;
  504.     lp->dma_b_pos = 0;
  505.     lp->dma_next_in = 0;
  506.     //lp->pdev = pdev;
  507.     sema_init(&lp->sem, 1); /* set the semaphore */
  508.     init_waitqueue_head(&lp->inq); /* init the que for blocking */
  509.     /* register the file ops (things can start) */
  510.     cdev_init(&lp->cdev, &luscher_fops);
  511.     lp->cdev.owner = THIS_MODULE;
  512.     lp->cdev.ops = &luscher_fops;
  513.     /* Fail gracefully if need be */
  514.     if ((rc = cdev_add (&lp->cdev, devno, 1)))
  515.         printk(KERN_NOTICE "Error %d adding fops", rc);
  516.     return 0; // All ok
  517. error3:
  518.     free_irq(lp->irq, lp);
  519. error2:
  520.     release_mem_region(lp->mem_start, lp->mem_end - lp->mem_start + 1);
  521. error1:
  522.     kfree(lp);
  523.     dev_set_drvdata(dev, NULL);
  524.  
  525.     unregister_chrdev_region(devno, 1);
  526.     return rc;
  527. }
  528.  
  529. static int luscher_remove(struct platform_device *pdev)
  530. {
  531.     struct device *dev = &pdev->dev;
  532.     struct luscher_local *lp = dev_get_drvdata(dev);
  533.     dev_t devno = MKDEV(luscher_major, luscher_minor);
  534.  
  535.  
  536.     dma_free_coherent(&pdev->dev, PAGE_ALIGN(lp->bsize), lp->b_virt,
  537.               lp->b_phys);
  538.    
  539.     free_irq(lp->irq, lp);
  540.     release_mem_region(lp->mem_start, lp->mem_end - lp->mem_start + 1);
  541.     kfree(lp);
  542.     dev_set_drvdata(dev, NULL);
  543.     /* cleanup_module is never called if registering failed */
  544.     unregister_chrdev_region(devno, 1);
  545.     return 0;
  546. }
  547.  
  548. #ifdef CONFIG_OF
  549. static struct of_device_id luscher_of_match[] = {
  550.     { .compatible = "vendor,luscher", },
  551.     { /* end of list */ },
  552. };
  553. MODULE_DEVICE_TABLE(of, luscher_of_match);
  554. #else
  555. # define luscher_of_match
  556. #endif
  557.  
  558.  
  559. static struct platform_driver luscher_driver = {
  560.     .driver = {
  561.         .name = DRIVER_NAME,
  562.         .owner = THIS_MODULE,
  563.         .of_match_table = luscher_of_match,
  564.     },
  565.     .probe      = luscher_probe,
  566.     .remove     = luscher_remove,
  567. };
  568.  
  569. static int __init luscher_init(void)
  570. {
  571.     /* printk("<1>Hello module world.\n"); */
  572.     return platform_driver_register(&luscher_driver);
  573. }
  574.  
  575.  
  576. static void __exit luscher_exit(void)
  577. {
  578.     platform_driver_unregister(&luscher_driver);
  579.     /* printk(KERN_ALERT "Goodbye module world.\n"); */
  580. }
  581.  
  582.  
  583. module_init(luscher_init);
  584. module_exit(luscher_exit);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement