Advertisement
Guest User

Untitled

a guest
Feb 15th, 2011
306
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 63.25 KB | None | 0 0
  1. /*
  2.  * MUSB OTG driver host support
  3.  *
  4.  * Copyright 2005 Mentor Graphics Corporation
  5.  * Copyright (C) 2005-2006 by Texas Instruments
  6.  * Copyright (C) 2006-2007 Nokia Corporation
  7.  *
  8.  * This program is free software; you can redistribute it and/or
  9.  * modify it under the terms of the GNU General Public License
  10.  * version 2 as published by the Free Software Foundation.
  11.  *
  12.  * This program is distributed in the hope that it will be useful, but
  13.  * WITHOUT ANY WARRANTY; without even the implied warranty of
  14.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15.  * General Public License for more details.
  16.  *
  17.  * You should have received a copy of the GNU General Public License
  18.  * along with this program; if not, write to the Free Software
  19.  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20.  * 02110-1301 USA
  21.  *
  22.  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  23.  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  24.  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  25.  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  26.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  27.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  28.  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  29.  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  30.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  31.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32.  *
  33.  */
  34.  
  35. #include <linux/module.h>
  36. #include <linux/kernel.h>
  37. #include <linux/delay.h>
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/errno.h>
  41. #include <linux/init.h>
  42. #include <linux/list.h>
  43.  
  44. #include "musb_core.h"
  45. #include "musb_host.h"
  46.  
  47.  
  48. /* MUSB HOST status 22-mar-2006
  49.  *
  50.  * - There's still lots of partial code duplication for fault paths, so
  51.  *   they aren't handled as consistently as they need to be.
  52.  *
  53.  * - PIO mostly behaved when last tested.
  54.  *     + including ep0, with all usbtest cases 9, 10
  55.  *     + usbtest 14 (ep0out) doesn't seem to run at all
  56.  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
  57.  *       configurations, but otherwise double buffering passes basic tests.
  58.  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
  59.  *
  60.  * - DMA (CPPI) ... partially behaves, not currently recommended
  61.  *     + about 1/15 the speed of typical EHCI implementations (PCI)
  62.  *     + RX, all too often reqpkt seems to misbehave after tx
  63.  *     + TX, no known issues (other than evident silicon issue)
  64.  *
  65.  * - DMA (Mentor/OMAP) ...has at least toggle update problems
  66.  *
  67.  * - Still no traffic scheduling code to make NAKing for bulk or control
  68.  *   transfers unable to starve other requests; or to make efficient use
  69.  *   of hardware with periodic transfers.  (Note that network drivers
  70.  *   commonly post bulk reads that stay pending for a long time; these
  71.  *   would make very visible trouble.)
  72.  *
  73.  * - Not tested with HNP, but some SRP paths seem to behave.
  74.  *
  75.  * NOTE 24-August-2006:
  76.  *
  77.  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
  78.  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
  79.  *   mostly works, except that with "usbnet" it's easy to trigger cases
  80.  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
  81.  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
  82.  *   although ARP RX wins.  (That test was done with a full speed link.)
  83.  */
  84.  
  85.  
  86. /*
  87.  * NOTE on endpoint usage:
  88.  *
  89.  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
  90.  * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
  91.  *
  92.  * (Yes, bulk _could_ use more of the endpoints than that, and would even
  93.  * benefit from it ... one remote device may easily be NAKing while others
  94.  * need to perform transfers in that same direction.  The same thing could
  95.  * be done in software though, assuming dma cooperates.)
  96.  *
  97.  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
  98.  * So far that scheduling is both dumb and optimistic:  the endpoint will be
  99.  * "claimed" until its software queue is no longer refilled.  No multiplexing
  100.  * of transfers between endpoints, or anything clever.
  101.  */
  102.  
  103.  
  104. static void musb_ep_program(struct musb *musb, u8 epnum,
  105.             struct urb *urb, unsigned int nOut,
  106.             u8 *buf, u32 len);
  107.  
  108. /*
  109.  * Clear TX fifo. Needed to avoid BABBLE errors.
  110.  */
  111. static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
  112. {
  113.     void __iomem    *epio = ep->regs;
  114.     u16     csr;
  115.     u16     lastcsr = 0;
  116.     int     retries = 1000;
  117.  
  118.     csr = musb_readw(epio, MUSB_TXCSR);
  119.     while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  120.         if (csr != lastcsr)
  121.             DBG_nonverb(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
  122.         lastcsr = csr;
  123.         csr |= MUSB_TXCSR_FLUSHFIFO;
  124.         musb_writew(epio, MUSB_TXCSR, csr);
  125.         csr = musb_readw(epio, MUSB_TXCSR);
  126.         if (WARN(retries-- < 1,
  127.                 "Could not flush host TX%d fifo: csr: %04x\n",
  128.                 ep->epnum, csr))
  129.             return;
  130.         mdelay(1);
  131.     }
  132. }
  133.  
  134. /*
  135.  * Start transmit. Caller is responsible for locking shared resources.
  136.  * musb must be locked.
  137.  */
  138. static inline void musb_h_tx_start(struct musb_hw_ep *ep)
  139. {
  140.     u16 txcsr;
  141.  
  142.     /* NOTE: no locks here; caller should lock and select EP */
  143.     if (ep->epnum) {
  144.         txcsr = musb_readw(ep->regs, MUSB_TXCSR);
  145.         txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
  146.         musb_writew(ep->regs, MUSB_TXCSR, txcsr);
  147.     } else {
  148.         txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
  149.         musb_writew(ep->regs, MUSB_CSR0, txcsr);
  150.     }
  151.  
  152. }
  153.  
  154. static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
  155. {
  156.     u16 txcsr;
  157.  
  158.     /* NOTE: no locks here; caller should lock and select EP */
  159.     txcsr = musb_readw(ep->regs, MUSB_TXCSR);
  160.     txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
  161.     musb_writew(ep->regs, MUSB_TXCSR, txcsr);
  162. }
  163.  
  164. /*
  165.  * Start the URB at the front of an endpoint's queue
  166.  * end must be claimed from the caller.
  167.  *
  168.  * Context: controller locked, irqs blocked
  169.  */
  170. static void
  171. musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
  172. {
  173.     u16         frame;
  174.     u32         len;
  175.     void            *buf;
  176.     void __iomem        *mbase =  musb->mregs;
  177.     struct urb      *urb = next_urb(qh);
  178.     struct musb_hw_ep   *hw_ep = qh->hw_ep;
  179.     int         epnum = hw_ep->epnum;
  180.  
  181.     /* initialize software qh state */
  182.     qh->offset = 0;
  183.     qh->segsize = 0;
  184.  
  185.     /* gather right source of data */
  186.     switch (qh->type) {
  187.     case USB_ENDPOINT_XFER_CONTROL:
  188.         /* control transfers always start with SETUP */
  189.         is_in = 0;
  190.         hw_ep->out_qh = qh;
  191.         musb->ep0_stage = MUSB_EP0_START;
  192.         buf = urb->setup_packet;
  193.         len = 8;
  194.         break;
  195.     case USB_ENDPOINT_XFER_ISOC:
  196.         qh->iso_idx = 0;
  197.         qh->frame = 0;
  198.         buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
  199.         len = urb->iso_frame_desc[0].length;
  200.         break;
  201.     default:        /* bulk, interrupt */
  202.         buf = urb->transfer_buffer;
  203.         len = urb->transfer_buffer_length;
  204.     }
  205.  
  206.     DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
  207.             qh, urb, usb_pipedevice(urb->pipe), qh->epnum,
  208.             is_in ? "in" : "out",
  209.             ({char *s; switch (qh->type) {
  210.             case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
  211.             case USB_ENDPOINT_XFER_BULK:    s = "-bulk"; break;
  212.             case USB_ENDPOINT_XFER_ISOC:    s = "-iso"; break;
  213.             default:            s = "-intr"; break;
  214.             }; s; }),
  215.             epnum, buf, len);
  216.  
  217.     /* Configure endpoint */
  218.     if (is_in || hw_ep->is_shared_fifo)
  219.         hw_ep->in_qh = qh;
  220.     else
  221.         hw_ep->out_qh = qh;
  222.     musb_ep_program(musb, epnum, urb, !is_in, buf, len);
  223.  
  224.     /* transmit may have more work: start it when it is time */
  225.     if (is_in)
  226.         return;
  227.  
  228.     /* determine if the time is right for a periodic transfer */
  229.     switch (qh->type) {
  230.     case USB_ENDPOINT_XFER_ISOC:
  231.     case USB_ENDPOINT_XFER_INT:
  232.         DBG(3, "check whether there's still time for periodic Tx\n");
  233.         qh->iso_idx = 0;
  234.         frame = musb_readw(mbase, MUSB_FRAME);
  235.         /* FIXME this doesn't implement that scheduling policy ...
  236.          * or handle framecounter wrapping
  237.          */
  238.         if ((urb->transfer_flags & URB_ISO_ASAP)
  239.                 || (frame >= urb->start_frame)) {
  240.             /* REVISIT the SOF irq handler shouldn't duplicate
  241.              * this code; and we don't init urb->start_frame...
  242.              */
  243.             qh->frame = 0;
  244.             goto start;
  245.         } else {
  246.             qh->frame = urb->start_frame;
  247.             /* enable SOF interrupt so we can count down */
  248.             DBG(1, "SOF for %d\n", epnum);
  249. #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
  250.             musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
  251. #endif
  252.         }
  253.         break;
  254.     default:
  255. start:
  256.         DBG(4, "Start TX%d %s\n", epnum,
  257.             hw_ep->tx_channel ? "dma" : "pio");
  258.  
  259.         if (!hw_ep->tx_channel)
  260.             musb_h_tx_start(hw_ep);
  261.         else if (cppi_ti_dma() || tusb_dma_omap())
  262.             cppi_host_txdma_start(hw_ep);
  263.     }
  264. }
  265.  
  266. /* caller owns controller lock, irqs are blocked */
  267. static void
  268. __musb_giveback(struct musb *musb, struct urb *urb, int status)
  269. __releases(musb->lock)
  270. __acquires(musb->lock)
  271. {
  272.     DBG(({ int level; switch (status) {
  273.                 case 0:
  274.                     level = 4;
  275.                     break;
  276.                 /* common/boring faults */
  277.                 case -EREMOTEIO:
  278.                 case -ESHUTDOWN:
  279.                 case -ECONNRESET:
  280.                 case -EPIPE:
  281.                     level = 3;
  282.                     break;
  283.                 default:
  284.                     level = 2;
  285.                     break;
  286.                 }; level; }),
  287.             "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
  288.             urb, urb->complete, status,
  289.             usb_pipedevice(urb->pipe),
  290.             usb_pipeendpoint(urb->pipe),
  291.             usb_pipein(urb->pipe) ? "in" : "out",
  292.             urb->actual_length, urb->transfer_buffer_length
  293.             );
  294.  
  295.     usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
  296.     spin_unlock(&musb->lock);
  297.     usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
  298.     spin_lock(&musb->lock);
  299. }
  300.  
  301. /* for bulk/interrupt endpoints only */
  302. static inline void
  303. musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
  304. {
  305.     struct usb_device   *udev = urb->dev;
  306.     u16         csr;
  307.     void __iomem        *epio = ep->regs;
  308.     struct musb_qh      *qh;
  309.  
  310.     /* FIXME:  the current Mentor DMA code seems to have
  311.      * problems getting toggle correct.
  312.      */
  313.  
  314.     if (is_in || ep->is_shared_fifo)
  315.         qh = ep->in_qh;
  316.     else
  317.         qh = ep->out_qh;
  318.  
  319.     if (!is_in) {
  320.         csr = musb_readw(epio, MUSB_TXCSR);
  321.         usb_settoggle(udev, qh->epnum, 1,
  322.             (csr & MUSB_TXCSR_H_DATATOGGLE)
  323.                 ? 1 : 0);
  324.     } else {
  325.         csr = musb_readw(epio, MUSB_RXCSR);
  326.         usb_settoggle(udev, qh->epnum, 0,
  327.             (csr & MUSB_RXCSR_H_DATATOGGLE)
  328.                 ? 1 : 0);
  329.     }
  330. }
  331.  
  332. /* caller owns controller lock, irqs are blocked */
  333. static struct musb_qh *
  334. musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
  335. {
  336.     struct musb_hw_ep   *ep = qh->hw_ep;
  337.     struct musb     *musb = ep->musb;
  338.     int         is_in = usb_pipein(urb->pipe);
  339.     int         ready = qh->is_ready;
  340.  
  341.     /* save toggle eagerly, for paranoia */
  342.     switch (qh->type) {
  343.     case USB_ENDPOINT_XFER_BULK:
  344.     case USB_ENDPOINT_XFER_INT:
  345.         musb_save_toggle(ep, is_in, urb);
  346.         break;
  347.     case USB_ENDPOINT_XFER_ISOC:
  348.         if (status == 0 && urb->error_count)
  349.             status = -EXDEV;
  350.         break;
  351.     }
  352.  
  353.     qh->is_ready = 0;
  354.     __musb_giveback(musb, urb, status);
  355.     qh->is_ready = ready;
  356.  
  357.     /* reclaim resources (and bandwidth) ASAP; deschedule it, and
  358.      * invalidate qh as soon as list_empty(&hep->urb_list)
  359.      */
  360.     if (list_empty(&qh->hep->urb_list)) {
  361.         struct list_head    *head;
  362.  
  363.         if (is_in)
  364.             ep->rx_reinit = 1;
  365.         else
  366.             ep->tx_reinit = 1;
  367.  
  368.         /* clobber old pointers to this qh */
  369.         if (is_in || ep->is_shared_fifo)
  370.             ep->in_qh = NULL;
  371.         else
  372.             ep->out_qh = NULL;
  373.         qh->hep->hcpriv = NULL;
  374.  
  375.         switch (qh->type) {
  376.  
  377.         case USB_ENDPOINT_XFER_CONTROL:
  378.         case USB_ENDPOINT_XFER_BULK:
  379.             /* fifo policy for these lists, except that NAKing
  380.              * should rotate a qh to the end (for fairness).
  381.              */
  382.             if (qh->mux == 1) {
  383.                 head = qh->ring.prev;
  384.                 list_del(&qh->ring);
  385.                 kfree(qh);
  386.                 qh = first_qh(head);
  387.                 break;
  388.             }
  389.  
  390.         case USB_ENDPOINT_XFER_ISOC:
  391.         case USB_ENDPOINT_XFER_INT:
  392.             /* this is where periodic bandwidth should be
  393.              * de-allocated if it's tracked and allocated;
  394.              * and where we'd update the schedule tree...
  395.              */
  396.             musb->periodic[ep->epnum] = NULL;
  397.             kfree(qh);
  398.             qh = NULL;
  399.             break;
  400.         }
  401.     }
  402.     return qh;
  403. }
  404.  
  405. /*
  406.  * Advance this hardware endpoint's queue, completing the specified urb and
  407.  * advancing to either the next urb queued to that qh, or else invalidating
  408.  * that qh and advancing to the next qh scheduled after the current one.
  409.  *
  410.  * Context: caller owns controller lock, irqs are blocked
  411.  */
  412. static void
  413. musb_advance_schedule(struct musb *musb, struct urb *urb,
  414.         struct musb_hw_ep *hw_ep, int is_in)
  415. {
  416.     struct musb_qh  *qh;
  417.  
  418.     if (is_in || hw_ep->is_shared_fifo)
  419.         qh = hw_ep->in_qh;
  420.     else
  421.         qh = hw_ep->out_qh;
  422.  
  423.     if (urb->status == -EINPROGRESS)
  424.         qh = musb_giveback(qh, urb, 0);
  425.     else
  426.         qh = musb_giveback(qh, urb, urb->status);
  427.  
  428.     if (qh != NULL && qh->is_ready) {
  429.         DBG(4, "... next ep%d %cX urb %p\n",
  430.                 hw_ep->epnum, is_in ? 'R' : 'T',
  431.                 next_urb(qh));
  432.         musb_start_urb(musb, is_in, qh);
  433.     }
  434. }
  435.  
  436. static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
  437. {
  438.     /* we don't want fifo to fill itself again;
  439.      * ignore dma (various models),
  440.      * leave toggle alone (may not have been saved yet)
  441.      */
  442.     csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
  443.     csr &= ~(MUSB_RXCSR_H_REQPKT
  444.         | MUSB_RXCSR_H_AUTOREQ
  445.         | MUSB_RXCSR_AUTOCLEAR);
  446.  
  447.     /* write 2x to allow double buffering */
  448.     musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  449.     musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  450.  
  451.     /* flush writebuffer */
  452.     return musb_readw(hw_ep->regs, MUSB_RXCSR);
  453. }
  454.  
  455. /*
  456.  * PIO RX for a packet (or part of it).
  457.  */
  458. static bool
  459. musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
  460. {
  461.     u16         rx_count;
  462.     u8          *buf;
  463.     u16         csr;
  464.     bool            done = false;
  465.     u32         length;
  466.     int         do_flush = 0;
  467.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  468.     void __iomem        *epio = hw_ep->regs;
  469.     struct musb_qh      *qh = hw_ep->in_qh;
  470.     int         pipe = urb->pipe;
  471.     void            *buffer = urb->transfer_buffer;
  472.  
  473.     /* musb_ep_select(mbase, epnum); */
  474.     rx_count = musb_readw(epio, MUSB_RXCOUNT);
  475.     DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
  476.             urb->transfer_buffer, qh->offset,
  477.             urb->transfer_buffer_length);
  478.  
  479.     /* unload FIFO */
  480.     if (usb_pipeisoc(pipe)) {
  481.         int                 status = 0;
  482.         struct usb_iso_packet_descriptor    *d;
  483.  
  484.         if (iso_err) {
  485.             status = -EILSEQ;
  486.             urb->error_count++;
  487.         }
  488.  
  489.         d = urb->iso_frame_desc + qh->iso_idx;
  490.         buf = buffer + d->offset;
  491.         length = d->length;
  492.         if (rx_count > length) {
  493.             if (status == 0) {
  494.                 status = -EOVERFLOW;
  495.                 urb->error_count++;
  496.             }
  497.             DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
  498.             do_flush = 1;
  499.         } else
  500.             length = rx_count;
  501.         urb->actual_length += length;
  502.         d->actual_length = length;
  503.  
  504.         d->status = status;
  505.  
  506.         /* see if we are done */
  507.         done = (++qh->iso_idx >= urb->number_of_packets);
  508.     } else {
  509.         /* non-isoch */
  510.         buf = buffer + qh->offset;
  511.         length = urb->transfer_buffer_length - qh->offset;
  512.         if (rx_count > length) {
  513.             if (urb->status == -EINPROGRESS)
  514.                 urb->status = -EOVERFLOW;
  515.             DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
  516.             do_flush = 1;
  517.         } else
  518.             length = rx_count;
  519.         urb->actual_length += length;
  520.         qh->offset += length;
  521.  
  522.         /* see if we are done */
  523.         done = (urb->actual_length == urb->transfer_buffer_length)
  524.             || (rx_count < qh->maxpacket)
  525.             || (urb->status != -EINPROGRESS);
  526.         if (done
  527.                 && (urb->status == -EINPROGRESS)
  528.                 && (urb->transfer_flags & URB_SHORT_NOT_OK)
  529.                 && (urb->actual_length
  530.                     < urb->transfer_buffer_length))
  531.             urb->status = -EREMOTEIO;
  532.     }
  533.  
  534.     musb_read_fifo(hw_ep, length, buf);
  535.  
  536.     csr = musb_readw(epio, MUSB_RXCSR);
  537.     csr |= MUSB_RXCSR_H_WZC_BITS;
  538.     if (unlikely(do_flush))
  539.         musb_h_flush_rxfifo(hw_ep, csr);
  540.     else {
  541.         /* REVISIT this assumes AUTOCLEAR is never set */
  542.         csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
  543.         if (!done)
  544.             csr |= MUSB_RXCSR_H_REQPKT;
  545.         musb_writew(epio, MUSB_RXCSR, csr);
  546.     }
  547.  
  548.     return done;
  549. }
  550.  
  551. /* we don't always need to reinit a given side of an endpoint...
  552.  * when we do, use tx/rx reinit routine and then construct a new CSR
  553.  * to address data toggle, NYET, and DMA or PIO.
  554.  *
  555.  * it's possible that driver bugs (especially for DMA) or aborting a
  556.  * transfer might have left the endpoint busier than it should be.
  557.  * the busy/not-empty tests are basically paranoia.
  558.  */
  559. static void
  560. musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
  561. {
  562.     u16 csr;
  563.  
  564.     /* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
  565.      * That always uses tx_reinit since ep0 repurposes TX register
  566.      * offsets; the initial SETUP packet is also a kind of OUT.
  567.      */
  568.  
  569.     /* if programmed for Tx, put it in RX mode */
  570.     if (ep->is_shared_fifo) {
  571.         csr = musb_readw(ep->regs, MUSB_TXCSR);
  572.         if (csr & MUSB_TXCSR_MODE) {
  573.             musb_h_tx_flush_fifo(ep);
  574.             musb_writew(ep->regs, MUSB_TXCSR,
  575.                     MUSB_TXCSR_FRCDATATOG);
  576.         }
  577.         /* clear mode (and everything else) to enable Rx */
  578.         musb_writew(ep->regs, MUSB_TXCSR, 0);
  579.  
  580.     /* scrub all previous state, clearing toggle */
  581.     } else {
  582.         csr = musb_readw(ep->regs, MUSB_RXCSR);
  583.         if (csr & MUSB_RXCSR_RXPKTRDY)
  584.             WARNING("rx%d, packet/%d ready?\n", ep->epnum,
  585.                 musb_readw(ep->regs, MUSB_RXCOUNT));
  586.  
  587.         musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
  588.     }
  589.  
  590.     /* target addr and (for multipoint) hub addr/port */
  591.     if (musb->is_multipoint) {
  592.         musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
  593.         musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
  594.         musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
  595.  
  596.     } else
  597.         musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
  598.  
  599.     /* protocol/endpoint, interval/NAKlimit, i/o size */
  600.     musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
  601.     musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
  602.     /* NOTE: bulk combining rewrites high bits of maxpacket */
  603.     musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
  604.  
  605.     ep->rx_reinit = 0;
  606. }
  607.  
  608.  
  609. /*
  610.  * Program an HDRC endpoint as per the given URB
  611.  * Context: irqs blocked, controller lock held
  612.  */
  613. static void musb_ep_program(struct musb *musb, u8 epnum,
  614.             struct urb *urb, unsigned int is_out,
  615.             u8 *buf, u32 len)
  616. {
  617.     struct dma_controller   *dma_controller;
  618.     struct dma_channel  *dma_channel;
  619.     u8          dma_ok;
  620.     void __iomem        *mbase = musb->mregs;
  621.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  622.     void __iomem        *epio = hw_ep->regs;
  623.     struct musb_qh      *qh;
  624.     u16         packet_sz;
  625.  
  626.     if (!is_out || hw_ep->is_shared_fifo)
  627.         qh = hw_ep->in_qh;
  628.     else
  629.         qh = hw_ep->out_qh;
  630.  
  631.     packet_sz = qh->maxpacket;
  632.  
  633.     DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
  634.                 "h_addr%02x h_port%02x bytes %d\n",
  635.             is_out ? "-->" : "<--",
  636.             epnum, urb, urb->dev->speed,
  637.             qh->addr_reg, qh->epnum, is_out ? "out" : "in",
  638.             qh->h_addr_reg, qh->h_port_reg,
  639.             len);
  640.  
  641.     musb_ep_select(mbase, epnum);
  642.  
  643.     /* candidate for DMA? */
  644.     dma_controller = musb->dma_controller;
  645.     if (is_dma_capable() && epnum && dma_controller) {
  646.         dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
  647.         if (!dma_channel) {
  648.             dma_channel = dma_controller->channel_alloc(
  649.                     dma_controller, hw_ep, is_out);
  650.             if (is_out)
  651.                 hw_ep->tx_channel = dma_channel;
  652.             else
  653.                 hw_ep->rx_channel = dma_channel;
  654.         }
  655.     } else
  656.         dma_channel = NULL;
  657.  
  658.     /* make sure we clear DMAEnab, autoSet bits from previous run */
  659.  
  660.     /* OUT/transmit/EP0 or IN/receive? */
  661.     if (is_out) {
  662.         u16 csr;
  663.         u16 int_txe;
  664.         u16 load_count;
  665.  
  666.         csr = musb_readw(epio, MUSB_TXCSR);
  667.  
  668.         /* disable interrupt in case we flush */
  669.         int_txe = musb_readw(mbase, MUSB_INTRTXE);
  670.         musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
  671.  
  672.         /* general endpoint setup */
  673.         if (epnum) {
  674.             /* ASSERT:  TXCSR_DMAENAB was already cleared */
  675.  
  676.             /* flush all old state, set default */
  677.             musb_h_tx_flush_fifo(hw_ep);
  678.             csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
  679.                     | MUSB_TXCSR_DMAMODE
  680.                     | MUSB_TXCSR_FRCDATATOG
  681.                     | MUSB_TXCSR_H_RXSTALL
  682.                     | MUSB_TXCSR_H_ERROR
  683.                     | MUSB_TXCSR_TXPKTRDY
  684.                     );
  685.             csr |= MUSB_TXCSR_MODE;
  686.  
  687.             if (usb_gettoggle(urb->dev,
  688.                     qh->epnum, 1))
  689.                 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
  690.                     | MUSB_TXCSR_H_DATATOGGLE;
  691.             else
  692.                 csr |= MUSB_TXCSR_CLRDATATOG;
  693.  
  694.             /* twice in case of double packet buffering */
  695.             musb_writew(epio, MUSB_TXCSR, csr);
  696.             /* REVISIT may need to clear FLUSHFIFO ... */
  697.             musb_writew(epio, MUSB_TXCSR, csr);
  698.             csr = musb_readw(epio, MUSB_TXCSR);
  699.         } else {
  700.             /* endpoint 0: just flush */
  701.             musb_writew(epio, MUSB_CSR0,
  702.                 csr | MUSB_CSR0_FLUSHFIFO);
  703.             musb_writew(epio, MUSB_CSR0,
  704.                 csr | MUSB_CSR0_FLUSHFIFO);
  705.         }
  706.  
  707.         /* target addr and (for multipoint) hub addr/port */
  708.         if (musb->is_multipoint) {
  709.             musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
  710.             musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
  711.             musb_write_txhubport(mbase, epnum, qh->h_port_reg);
  712. /* FIXME if !epnum, do the same for RX ... */
  713.         } else
  714.             musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
  715.  
  716.         /* protocol/endpoint/interval/NAKlimit */
  717.         if (epnum) {
  718.             musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
  719.             if (can_bulk_split(musb, qh->type))
  720.                 musb_writew(epio, MUSB_TXMAXP,
  721.                     packet_sz
  722.                     | ((hw_ep->max_packet_sz_tx /
  723.                         packet_sz) - 1) << 11);
  724.             else
  725.                 musb_writew(epio, MUSB_TXMAXP,
  726.                     packet_sz);
  727.             musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
  728.         } else {
  729.             musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
  730.             if (musb->is_multipoint)
  731.                 musb_writeb(epio, MUSB_TYPE0,
  732.                         qh->type_reg);
  733.         }
  734.  
  735.         if (can_bulk_split(musb, qh->type))
  736.             load_count = min((u32) hw_ep->max_packet_sz_tx,
  737.                         len);
  738.         else
  739.             load_count = min((u32) packet_sz, len);
  740.  
  741.         if (musb_inventra_dma() && dma_channel) {
  742.  
  743.             /* clear previous state */
  744.             csr = musb_readw(epio, MUSB_TXCSR);
  745.             csr &= ~(MUSB_TXCSR_AUTOSET
  746.                 | MUSB_TXCSR_DMAMODE
  747.                 | MUSB_TXCSR_DMAENAB);
  748.             csr |= MUSB_TXCSR_MODE;
  749.             musb_writew(epio, MUSB_TXCSR,
  750.                 csr | MUSB_TXCSR_MODE);
  751.  
  752.             qh->segsize = min(len, dma_channel->max_len);
  753.  
  754.             if (qh->segsize <= packet_sz)
  755.                 dma_channel->desired_mode = 0;
  756.             else
  757.                 dma_channel->desired_mode = 1;
  758.  
  759.  
  760.             if (dma_channel->desired_mode == 0) {
  761.                 csr &= ~(MUSB_TXCSR_AUTOSET
  762.                     | MUSB_TXCSR_DMAMODE);
  763.                 csr |= (MUSB_TXCSR_DMAENAB);
  764.                     /* against programming guide */
  765.             } else
  766.                 csr |= (MUSB_TXCSR_AUTOSET
  767.                     | MUSB_TXCSR_DMAENAB
  768.                     | MUSB_TXCSR_DMAMODE);
  769.  
  770.             musb_writew(epio, MUSB_TXCSR, csr);
  771.  
  772.             dma_ok = dma_controller->channel_program(
  773.                     dma_channel, packet_sz,
  774.                     dma_channel->desired_mode,
  775.                     urb->transfer_dma,
  776.                     qh->segsize);
  777.             if (dma_ok) {
  778.                 load_count = 0;
  779.             } else {
  780.                 dma_controller->channel_release(dma_channel);
  781.                 if (is_out)
  782.                     hw_ep->tx_channel = NULL;
  783.                 else
  784.                     hw_ep->rx_channel = NULL;
  785.                 dma_channel = NULL;
  786.             }
  787.         }
  788.  
  789.         /* candidate for DMA */
  790.         if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
  791.  
  792.             /* program endpoint CSRs first, then setup DMA.
  793.              * assume CPPI setup succeeds.
  794.              * defer enabling dma.
  795.              */
  796.             csr = musb_readw(epio, MUSB_TXCSR);
  797.             csr &= ~(MUSB_TXCSR_AUTOSET
  798.                     | MUSB_TXCSR_DMAMODE
  799.                     | MUSB_TXCSR_DMAENAB);
  800.             csr |= MUSB_TXCSR_MODE;
  801.             musb_writew(epio, MUSB_TXCSR,
  802.                 csr | MUSB_TXCSR_MODE);
  803.  
  804.             dma_channel->actual_len = 0L;
  805.             qh->segsize = len;
  806.  
  807.             /* TX uses "rndis" mode automatically, but needs help
  808.              * to identify the zero-length-final-packet case.
  809.              */
  810.             dma_ok = dma_controller->channel_program(
  811.                     dma_channel, packet_sz,
  812.                     (urb->transfer_flags
  813.                             & URB_ZERO_PACKET)
  814.                         == URB_ZERO_PACKET,
  815.                     urb->transfer_dma,
  816.                     qh->segsize);
  817.             if (dma_ok) {
  818.                 load_count = 0;
  819.             } else {
  820.                 dma_controller->channel_release(dma_channel);
  821.                 hw_ep->tx_channel = NULL;
  822.                 dma_channel = NULL;
  823.  
  824.                 /* REVISIT there's an error path here that
  825.                  * needs handling:  can't do dma, but
  826.                  * there's no pio buffer address...
  827.                  */
  828.             }
  829.         }
  830.  
  831.         if (load_count) {
  832.             /* ASSERT:  TXCSR_DMAENAB was already cleared */
  833.  
  834.             /* PIO to load FIFO */
  835.             qh->segsize = load_count;
  836.             musb_write_fifo(hw_ep, load_count, buf);
  837.             csr = musb_readw(epio, MUSB_TXCSR);
  838.             csr &= ~(MUSB_TXCSR_DMAENAB
  839.                 | MUSB_TXCSR_DMAMODE
  840.                 | MUSB_TXCSR_AUTOSET);
  841.             /* write CSR */
  842.             csr |= MUSB_TXCSR_MODE;
  843.  
  844.             if (epnum)
  845.                 musb_writew(epio, MUSB_TXCSR, csr);
  846.         }
  847.  
  848.         /* re-enable interrupt */
  849.         musb_writew(mbase, MUSB_INTRTXE, int_txe);
  850.  
  851.     /* IN/receive */
  852.     } else {
  853.         u16 csr;
  854.  
  855.         if (hw_ep->rx_reinit) {
  856.             musb_rx_reinit(musb, qh, hw_ep);
  857.  
  858.             /* init new state: toggle and NYET, maybe DMA later */
  859.             if (usb_gettoggle(urb->dev, qh->epnum, 0))
  860.                 csr = MUSB_RXCSR_H_WR_DATATOGGLE
  861.                     | MUSB_RXCSR_H_DATATOGGLE;
  862.             else
  863.                 csr = 0;
  864.             if (qh->type == USB_ENDPOINT_XFER_INT)
  865.                 csr |= MUSB_RXCSR_DISNYET;
  866.  
  867.         } else {
  868.             csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
  869.  
  870.             if (csr & (MUSB_RXCSR_RXPKTRDY
  871.                     | MUSB_RXCSR_DMAENAB
  872.                     | MUSB_RXCSR_H_REQPKT))
  873.                 ERR("broken !rx_reinit, ep%d csr %04x\n",
  874.                         hw_ep->epnum, csr);
  875.  
  876.             /* scrub any stale state, leaving toggle alone */
  877.             csr &= MUSB_RXCSR_DISNYET;
  878.         }
  879.  
  880.         /* kick things off */
  881.  
  882.         if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
  883.             /* candidate for DMA */
  884.             if (dma_channel) {
  885.                 dma_channel->actual_len = 0L;
  886.                 qh->segsize = len;
  887.  
  888.                 /* AUTOREQ is in a DMA register */
  889.                 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  890.                 csr = musb_readw(hw_ep->regs,
  891.                         MUSB_RXCSR);
  892.  
  893.                 /* unless caller treats short rx transfers as
  894.                  * errors, we dare not queue multiple transfers.
  895.                  */
  896.                 dma_ok = dma_controller->channel_program(
  897.                         dma_channel, packet_sz,
  898.                         !(urb->transfer_flags
  899.                             & URB_SHORT_NOT_OK),
  900.                         urb->transfer_dma,
  901.                         qh->segsize);
  902.                 if (!dma_ok) {
  903.                     dma_controller->channel_release(
  904.                             dma_channel);
  905.                     hw_ep->rx_channel = NULL;
  906.                     dma_channel = NULL;
  907.                 } else
  908.                     csr |= MUSB_RXCSR_DMAENAB;
  909.             }
  910.         }
  911.  
  912.         csr |= MUSB_RXCSR_H_REQPKT;
  913.         DBG(7, "RXCSR%d := %04x\n", epnum, csr);
  914.         musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  915.         csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
  916.     }
  917. }
  918.  
  919.  
  920. /*
  921.  * Service the default endpoint (ep0) as host.
  922.  * Return true until it's time to start the status stage.
  923.  */
  924. static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
  925. {
  926.     bool             more = false;
  927.     u8          *fifo_dest = NULL;
  928.     u16         fifo_count = 0;
  929.     struct musb_hw_ep   *hw_ep = musb->control_ep;
  930.     struct musb_qh      *qh = hw_ep->in_qh;
  931.     struct usb_ctrlrequest  *request;
  932.  
  933.     switch (musb->ep0_stage) {
  934.     case MUSB_EP0_IN:
  935.         fifo_dest = urb->transfer_buffer + urb->actual_length;
  936.         fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
  937.                    urb->actual_length);
  938.         if (fifo_count < len)
  939.             urb->status = -EOVERFLOW;
  940.  
  941.         musb_read_fifo(hw_ep, fifo_count, fifo_dest);
  942.  
  943.         urb->actual_length += fifo_count;
  944.         if (len < qh->maxpacket) {
  945.             /* always terminate on short read; it's
  946.              * rarely reported as an error.
  947.              */
  948.         } else if (urb->actual_length <
  949.                 urb->transfer_buffer_length)
  950.             more = true;
  951.         break;
  952.     case MUSB_EP0_START:
  953.         request = (struct usb_ctrlrequest *) urb->setup_packet;
  954.  
  955.         if (!request->wLength) {
  956.             DBG(4, "start no-DATA\n");
  957.             break;
  958.         } else if (request->bRequestType & USB_DIR_IN) {
  959.             DBG(4, "start IN-DATA\n");
  960.             musb->ep0_stage = MUSB_EP0_IN;
  961.             more = true;
  962.             break;
  963.         } else {
  964.             DBG(4, "start OUT-DATA\n");
  965.             musb->ep0_stage = MUSB_EP0_OUT;
  966.             more = true;
  967.         }
  968.         /* FALLTHROUGH */
  969.     case MUSB_EP0_OUT:
  970.         fifo_count = min_t(size_t, qh->maxpacket,
  971.                    urb->transfer_buffer_length -
  972.                    urb->actual_length);
  973.         if (fifo_count) {
  974.             fifo_dest = (u8 *) (urb->transfer_buffer
  975.                     + urb->actual_length);
  976.             DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
  977.                     fifo_count,
  978.                     (fifo_count == 1) ? "" : "s",
  979.                     fifo_dest);
  980.             musb_write_fifo(hw_ep, fifo_count, fifo_dest);
  981.  
  982.             urb->actual_length += fifo_count;
  983.             more = true;
  984.         }
  985.         break;
  986.     default:
  987.         ERR("bogus ep0 stage %d\n", musb->ep0_stage);
  988.         break;
  989.     }
  990.  
  991.     return more;
  992. }
  993.  
  994. /*
  995.  * Handle default endpoint interrupt as host. Only called in IRQ time
  996.  * from musb_interrupt().
  997.  *
  998.  * called with controller irqlocked
  999.  */
  1000. irqreturn_t musb_h_ep0_irq(struct musb *musb)
  1001. {
  1002.     struct urb      *urb;
  1003.     u16         csr, len;
  1004.     int         status = 0;
  1005.     void __iomem        *mbase = musb->mregs;
  1006.     struct musb_hw_ep   *hw_ep = musb->control_ep;
  1007.     void __iomem        *epio = hw_ep->regs;
  1008.     struct musb_qh      *qh = hw_ep->in_qh;
  1009.     bool            complete = false;
  1010.     irqreturn_t     retval = IRQ_NONE;
  1011.  
  1012.     /* ep0 only has one queue, "in" */
  1013.     urb = next_urb(qh);
  1014.  
  1015.     musb_ep_select(mbase, 0);
  1016.     csr = musb_readw(epio, MUSB_CSR0);
  1017.     len = (csr & MUSB_CSR0_RXPKTRDY)
  1018.             ? musb_readb(epio, MUSB_COUNT0)
  1019.             : 0;
  1020.  
  1021.     DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
  1022.         csr, qh, len, urb, musb->ep0_stage);
  1023.  
  1024.     /* if we just did status stage, we are done */
  1025.     if (MUSB_EP0_STATUS == musb->ep0_stage) {
  1026.         retval = IRQ_HANDLED;
  1027.         complete = true;
  1028.     }
  1029.  
  1030.     /* prepare status */
  1031.     if (csr & MUSB_CSR0_H_RXSTALL) {
  1032.         DBG(6, "STALLING ENDPOINT\n");
  1033.         status = -EPIPE;
  1034.  
  1035.     } else if (csr & MUSB_CSR0_H_ERROR) {
  1036.         DBG(2, "no response, csr0 %04x\n", csr);
  1037.         status = -EPROTO;
  1038.  
  1039.     } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
  1040.         DBG(2, "control NAK timeout\n");
  1041.  
  1042.         /* NOTE:  this code path would be a good place to PAUSE a
  1043.          * control transfer, if another one is queued, so that
  1044.          * ep0 is more likely to stay busy.
  1045.          *
  1046.          * if (qh->ring.next != &musb->control), then
  1047.          * we have a candidate... NAKing is *NOT* an error
  1048.          */
  1049.         musb_writew(epio, MUSB_CSR0, 0);
  1050.         retval = IRQ_HANDLED;
  1051.     }
  1052.  
  1053.     if (status) {
  1054.         DBG(6, "aborting\n");
  1055.         retval = IRQ_HANDLED;
  1056.         if (urb)
  1057.             urb->status = status;
  1058.         complete = true;
  1059.  
  1060.         /* use the proper sequence to abort the transfer */
  1061.         if (csr & MUSB_CSR0_H_REQPKT) {
  1062.             csr &= ~MUSB_CSR0_H_REQPKT;
  1063.             musb_writew(epio, MUSB_CSR0, csr);
  1064.             csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
  1065.             musb_writew(epio, MUSB_CSR0, csr);
  1066.         } else {
  1067.             csr |= MUSB_CSR0_FLUSHFIFO;
  1068.             musb_writew(epio, MUSB_CSR0, csr);
  1069.             musb_writew(epio, MUSB_CSR0, csr);
  1070.             csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
  1071.             musb_writew(epio, MUSB_CSR0, csr);
  1072.         }
  1073.  
  1074.         musb_writeb(epio, MUSB_NAKLIMIT0, 0);
  1075.  
  1076.         /* clear it */
  1077.         musb_writew(epio, MUSB_CSR0, 0);
  1078.     }
  1079.  
  1080.     if (unlikely(!urb)) {
  1081.         /* stop endpoint since we have no place for its data, this
  1082.          * SHOULD NEVER HAPPEN! */
  1083.         ERR("no URB for end 0\n");
  1084.  
  1085.         musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
  1086.         musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
  1087.         musb_writew(epio, MUSB_CSR0, 0);
  1088.  
  1089.         goto done;
  1090.     }
  1091.  
  1092.     if (!complete) {
  1093.         /* call common logic and prepare response */
  1094.         if (musb_h_ep0_continue(musb, len, urb)) {
  1095.             /* more packets required */
  1096.             csr = (MUSB_EP0_IN == musb->ep0_stage)
  1097.                 ?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
  1098.         } else {
  1099.             /* data transfer complete; perform status phase */
  1100.             if (usb_pipeout(urb->pipe)
  1101.                     || !urb->transfer_buffer_length)
  1102.                 csr = MUSB_CSR0_H_STATUSPKT
  1103.                     | MUSB_CSR0_H_REQPKT;
  1104.             else
  1105.                 csr = MUSB_CSR0_H_STATUSPKT
  1106.                     | MUSB_CSR0_TXPKTRDY;
  1107.  
  1108.             /* flag status stage */
  1109.             musb->ep0_stage = MUSB_EP0_STATUS;
  1110.  
  1111.             DBG(5, "ep0 STATUS, csr %04x\n", csr);
  1112.  
  1113.         }
  1114.         musb_writew(epio, MUSB_CSR0, csr);
  1115.         retval = IRQ_HANDLED;
  1116.     } else
  1117.         musb->ep0_stage = MUSB_EP0_IDLE;
  1118.  
  1119.     /* call completion handler if done */
  1120.     if (complete)
  1121.         musb_advance_schedule(musb, urb, hw_ep, 1);
  1122. done:
  1123.     return retval;
  1124. }
  1125.  
  1126.  
  1127. #ifdef CONFIG_USB_INVENTRA_DMA
  1128.  
  1129. /* Host side TX (OUT) using Mentor DMA works as follows:
  1130.     submit_urb ->
  1131.         - if queue was empty, Program Endpoint
  1132.         - ... which starts DMA to fifo in mode 1 or 0
  1133.  
  1134.     DMA Isr (transfer complete) -> TxAvail()
  1135.         - Stop DMA (~DmaEnab)   (<--- Alert ... currently happens
  1136.                     only in musb_cleanup_urb)
  1137.         - TxPktRdy has to be set in mode 0 or for
  1138.             short packets in mode 1.
  1139. */
  1140.  
  1141. #endif
  1142.  
  1143. /* Service a Tx-Available or dma completion irq for the endpoint */
  1144. void musb_host_tx(struct musb *musb, u8 epnum)
  1145. {
  1146.     int         pipe;
  1147.     bool            done = false;
  1148.     u16         tx_csr;
  1149.     size_t          wLength = 0;
  1150.     u8          *buf = NULL;
  1151.     struct urb      *urb;
  1152.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  1153.     void __iomem        *epio = hw_ep->regs;
  1154.     struct musb_qh      *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
  1155.                                 : hw_ep->out_qh;
  1156.     u32         status = 0;
  1157.     void __iomem        *mbase = musb->mregs;
  1158.     struct dma_channel  *dma;
  1159.  
  1160.     urb = next_urb(qh);
  1161.  
  1162.     musb_ep_select(mbase, epnum);
  1163.     tx_csr = musb_readw(epio, MUSB_TXCSR);
  1164.  
  1165.     /* with CPPI, DMA sometimes triggers "extra" irqs */
  1166.     if (!urb) {
  1167.         DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
  1168.         goto finish;
  1169.     }
  1170.  
  1171.     pipe = urb->pipe;
  1172.     dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
  1173.     DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
  1174.             dma ? ", dma" : "");
  1175.  
  1176.     /* check for errors */
  1177.     if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
  1178.         /* dma was disabled, fifo flushed */
  1179.         DBG(3, "TX end %d stall\n", epnum);
  1180.  
  1181.         /* stall; record URB status */
  1182.         status = -EPIPE;
  1183.  
  1184.     } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
  1185.         /* (NON-ISO) dma was disabled, fifo flushed */
  1186.         DBG(3, "TX 3strikes on ep=%d\n", epnum);
  1187.  
  1188.         status = -ETIMEDOUT;
  1189.  
  1190.     } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
  1191.         DBG(6, "TX end=%d device not responding\n", epnum);
  1192.  
  1193.         /* NOTE:  this code path would be a good place to PAUSE a
  1194.          * transfer, if there's some other (nonperiodic) tx urb
  1195.          * that could use this fifo.  (dma complicates it...)
  1196.          *
  1197.          * if (bulk && qh->ring.next != &hw_ep->out_list), then
  1198.          * we have a candidate... NAKing is *NOT* an error
  1199.          */
  1200.         musb_ep_select(mbase, epnum);
  1201.         musb_writew(epio, MUSB_TXCSR,
  1202.                 MUSB_TXCSR_H_WZC_BITS
  1203.                 | MUSB_TXCSR_TXPKTRDY);
  1204.         goto finish;
  1205.     }
  1206.  
  1207.     if (status) {
  1208.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1209.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1210.             (void) musb->dma_controller->channel_abort(dma);
  1211.         }
  1212.  
  1213.         /* do the proper sequence to abort the transfer in the
  1214.          * usb core; the dma engine should already be stopped.
  1215.          */
  1216.         musb_h_tx_flush_fifo(hw_ep);
  1217.         tx_csr &= ~(MUSB_TXCSR_AUTOSET
  1218.                 | MUSB_TXCSR_DMAENAB
  1219.                 | MUSB_TXCSR_H_ERROR
  1220.                 | MUSB_TXCSR_H_RXSTALL
  1221.                 | MUSB_TXCSR_H_NAKTIMEOUT
  1222.                 );
  1223.  
  1224.         musb_ep_select(mbase, epnum);
  1225.         musb_writew(epio, MUSB_TXCSR, tx_csr);
  1226.         /* REVISIT may need to clear FLUSHFIFO ... */
  1227.         musb_writew(epio, MUSB_TXCSR, tx_csr);
  1228.         musb_writeb(epio, MUSB_TXINTERVAL, 0);
  1229.  
  1230.         done = true;
  1231.     }
  1232.  
  1233.     /* second cppi case */
  1234.     if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1235.         DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
  1236.         goto finish;
  1237.  
  1238.     }
  1239.  
  1240.     /* REVISIT this looks wrong... */
  1241.     if (!status || dma || usb_pipeisoc(pipe)) {
  1242.         if (dma)
  1243.             wLength = dma->actual_len;
  1244.         else
  1245.             wLength = qh->segsize;
  1246.         qh->offset += wLength;
  1247.  
  1248.         if (usb_pipeisoc(pipe)) {
  1249.             struct usb_iso_packet_descriptor    *d;
  1250.  
  1251.             d = urb->iso_frame_desc + qh->iso_idx;
  1252.             d->actual_length = qh->segsize;
  1253.             if (++qh->iso_idx >= urb->number_of_packets) {
  1254.                 done = true;
  1255.             } else {
  1256.                 d++;
  1257.                 buf = urb->transfer_buffer + d->offset;
  1258.                 wLength = d->length;
  1259.             }
  1260.         } else if (dma) {
  1261.             done = true;
  1262.         } else {
  1263.             /* see if we need to send more data, or ZLP */
  1264.             if (qh->segsize < qh->maxpacket)
  1265.                 done = true;
  1266.             else if (qh->offset == urb->transfer_buffer_length
  1267.                     && !(urb->transfer_flags
  1268.                         & URB_ZERO_PACKET))
  1269.                 done = true;
  1270.             if (!done) {
  1271.                 buf = urb->transfer_buffer
  1272.                         + qh->offset;
  1273.                 wLength = urb->transfer_buffer_length
  1274.                         - qh->offset;
  1275.             }
  1276.         }
  1277.     }
  1278.  
  1279.     /* urb->status != -EINPROGRESS means request has been faulted,
  1280.      * so we must abort this transfer after cleanup
  1281.      */
  1282.     if (urb->status != -EINPROGRESS) {
  1283.         done = true;
  1284.         if (status == 0)
  1285.             status = urb->status;
  1286.     }
  1287.  
  1288.     if (done) {
  1289.         /* set status */
  1290.         urb->status = status;
  1291.         urb->actual_length = qh->offset;
  1292.         musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
  1293.  
  1294.     } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
  1295.         /* WARN_ON(!buf); */
  1296.  
  1297.         /* REVISIT:  some docs say that when hw_ep->tx_double_buffered,
  1298.          * (and presumably, fifo is not half-full) we should write TWO
  1299.          * packets before updating TXCSR ... other docs disagree ...
  1300.          */
  1301.         /* PIO:  start next packet in this URB */
  1302.         if (wLength > qh->maxpacket)
  1303.             wLength = qh->maxpacket;
  1304.         musb_write_fifo(hw_ep, wLength, buf);
  1305.         qh->segsize = wLength;
  1306.  
  1307.         musb_ep_select(mbase, epnum);
  1308.         musb_writew(epio, MUSB_TXCSR,
  1309.                 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
  1310.     } else
  1311.         DBG(1, "not complete, but dma enabled?\n");
  1312.  
  1313. finish:
  1314.     return;
  1315. }
  1316.  
  1317.  
  1318. #ifdef CONFIG_USB_INVENTRA_DMA
  1319.  
  1320. /* Host side RX (IN) using Mentor DMA works as follows:
  1321.     submit_urb ->
  1322.         - if queue was empty, ProgramEndpoint
  1323.         - first IN token is sent out (by setting ReqPkt)
  1324.     LinuxIsr -> RxReady()
  1325.     /\  => first packet is received
  1326.     |   - Set in mode 0 (DmaEnab, ~ReqPkt)
  1327.     |       -> DMA Isr (transfer complete) -> RxReady()
  1328.     |           - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
  1329.     |           - if urb not complete, send next IN token (ReqPkt)
  1330.     |              |        else complete urb.
  1331.     |              |
  1332.     ---------------------------
  1333.  *
  1334.  * Nuances of mode 1:
  1335.  *  For short packets, no ack (+RxPktRdy) is sent automatically
  1336.  *  (even if AutoClear is ON)
  1337.  *  For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
  1338.  *  automatically => major problem, as collecting the next packet becomes
  1339.  *  difficult. Hence mode 1 is not used.
  1340.  *
  1341.  * REVISIT
  1342.  *  All we care about at this driver level is that
  1343.  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
  1344.  *       (b) termination conditions are: short RX, or buffer full;
  1345.  *       (c) fault modes include
  1346.  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
  1347.  *             (and that endpoint's dma queue stops immediately)
  1348.  *           - overflow (full, PLUS more bytes in the terminal packet)
  1349.  *
  1350.  *  So for example, usb-storage sets URB_SHORT_NOT_OK, and would
  1351.  *  thus be a great candidate for using mode 1 ... for all but the
  1352.  *  last packet of one URB's transfer.
  1353.  */
  1354.  
  1355. #endif
  1356.  
  1357. /* Schedule next qh from ep->in_list and add the current qh at tail
  1358.  * to avoid endpoint starvation.
  1359.  */
  1360. static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
  1361. {
  1362.     struct dma_channel  *dma;
  1363.     struct urb  *urb;
  1364.     void __iomem    *mbase = musb->mregs;
  1365.     void __iomem    *epio = ep->regs;
  1366.     struct musb_qh  *cur_qh, *next_qh;
  1367.     u16 rx_csr;
  1368.  
  1369.     musb_ep_select(mbase, ep->epnum);
  1370.     dma = is_dma_capable() ? ep->rx_channel : NULL;
  1371.  
  1372.     /* clear nak timeout bit */
  1373.     rx_csr = musb_readw(epio, MUSB_RXCSR);
  1374.     rx_csr &= ~MUSB_RXCSR_DATAERROR;
  1375.     musb_writew(epio, MUSB_RXCSR, rx_csr);
  1376.  
  1377.     cur_qh = first_qh(&ep->in_list);
  1378.     if (cur_qh) {
  1379.         urb = next_urb(cur_qh);
  1380.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1381.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1382.             musb->dma_controller->channel_abort(dma);
  1383.             urb->actual_length += dma->actual_len;
  1384.             dma->actual_len = 0L;
  1385.         }
  1386.         musb_save_toggle(ep, 1, urb);
  1387.  
  1388.         /* delete cur_qh and add to tail to ep->in_list */
  1389.         list_del(&cur_qh->ring);
  1390.         list_add_tail(&cur_qh->ring, &ep->in_list);
  1391.  
  1392.         /* get the next qh from ep->in_list */
  1393.         next_qh = first_qh(&ep->in_list);
  1394.  
  1395.         /* set rx_reinit and schedule the next qh */
  1396.         ep->rx_reinit = 1;
  1397.         musb_start_urb(musb, 1, next_qh);
  1398.     }
  1399. }
  1400.  
  1401. /*
  1402.  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
  1403.  * and high-bandwidth IN transfer cases.
  1404.  */
  1405. void musb_host_rx(struct musb *musb, u8 epnum)
  1406. {
  1407.     struct urb      *urb;
  1408.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  1409.     void __iomem        *epio = hw_ep->regs;
  1410.     struct musb_qh      *qh = hw_ep->in_qh;
  1411.     size_t          xfer_len;
  1412.     void __iomem        *mbase = musb->mregs;
  1413.     int         pipe;
  1414.     u16         rx_csr, val;
  1415.     bool            iso_err = false;
  1416.     bool            done = false;
  1417.     u32         status;
  1418.     struct dma_channel  *dma;
  1419.  
  1420.     musb_ep_select(mbase, epnum);
  1421.  
  1422.     urb = next_urb(qh);
  1423.     dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
  1424.     status = 0;
  1425.     xfer_len = 0;
  1426.  
  1427.     rx_csr = musb_readw(epio, MUSB_RXCSR);
  1428.     val = rx_csr;
  1429.  
  1430.     if (unlikely(!urb)) {
  1431.         /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
  1432.          * usbtest #11 (unlinks) triggers it regularly, sometimes
  1433.          * with fifo full.  (Only with DMA??)
  1434.          */
  1435.         DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
  1436.             musb_readw(epio, MUSB_RXCOUNT));
  1437.         musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
  1438.         return;
  1439.     }
  1440.  
  1441.     pipe = urb->pipe;
  1442.  
  1443.     DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
  1444.         epnum, rx_csr, urb->actual_length,
  1445.         dma ? dma->actual_len : 0);
  1446.  
  1447.     /* check for errors, concurrent stall & unlink is not really
  1448.      * handled yet! */
  1449.     if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
  1450.         DBG(3, "RX end %d STALL\n", epnum);
  1451.  
  1452.         /* stall; record URB status */
  1453.         status = -EPIPE;
  1454.  
  1455.     } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
  1456.         DBG(3, "end %d RX proto error\n", epnum);
  1457.  
  1458.         status = -EPROTO;
  1459.         musb_writeb(epio, MUSB_RXINTERVAL, 0);
  1460.  
  1461.     } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
  1462.  
  1463.         if (USB_ENDPOINT_XFER_ISOC != qh->type) {
  1464.             /* NOTE this code path would be a good place to PAUSE a
  1465.              * transfer, if there's some other (nonperiodic) rx urb
  1466.              * that could use this fifo.  (dma complicates it...)
  1467.              *
  1468.              * if (bulk && qh->ring.next != &hw_ep->in_list), then
  1469.              * we have a candidate... NAKing is *NOT* an error
  1470.              */
  1471.             DBG(6, "RX end %d NAK timeout\n", epnum);
  1472.             if (usb_pipebulk(urb->pipe) && qh->mux == 1 &&
  1473.                 !list_is_singular(&hw_ep->in_list)) {
  1474.                 musb_bulk_nak_timeout(musb, hw_ep);
  1475.                 return;
  1476.             }
  1477.             musb_ep_select(mbase, epnum);
  1478.             rx_csr &= ~MUSB_RXCSR_DATAERROR;
  1479.             musb_writew(epio, MUSB_RXCSR, rx_csr);
  1480.  
  1481.             goto finish;
  1482.         } else {
  1483.             DBG(4, "RX end %d ISO data error\n", epnum);
  1484.             /* packet error reported later */
  1485.             iso_err = true;
  1486.         }
  1487.     } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
  1488.         DBG(3, "end %d Highbandwidth  incomplete ISO packet received\n",
  1489.                 epnum);
  1490.         status = -EPROTO;
  1491.     }
  1492.  
  1493.     /* faults abort the transfer */
  1494.     if (status) {
  1495.         /* clean up dma and collect transfer count */
  1496.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1497.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1498.             (void) musb->dma_controller->channel_abort(dma);
  1499.             xfer_len = dma->actual_len;
  1500.         }
  1501.         musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
  1502.         musb_writeb(epio, MUSB_RXINTERVAL, 0);
  1503.         done = true;
  1504.         goto finish;
  1505.     }
  1506.  
  1507.     if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
  1508.         /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
  1509.         ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
  1510.         goto finish;
  1511.     }
  1512.  
  1513.     /* thorough shutdown for now ... given more precise fault handling
  1514.      * and better queueing support, we might keep a DMA pipeline going
  1515.      * while processing this irq for earlier completions.
  1516.      */
  1517.  
  1518.     /* FIXME this is _way_ too much in-line logic for Mentor DMA */
  1519.  
  1520.     if (!musb_inventra_dma() && (rx_csr & MUSB_RXCSR_H_REQPKT))  {
  1521.         /* REVISIT this happened for a while on some short reads...
  1522.          * the cleanup still needs investigation... looks bad...
  1523.          * and also duplicates dma cleanup code above ... plus,
  1524.          * shouldn't this be the "half full" double buffer case?
  1525.          */
  1526.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1527.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1528.             (void) musb->dma_controller->channel_abort(dma);
  1529.             xfer_len = dma->actual_len;
  1530.             done = true;
  1531.         }
  1532.  
  1533.         DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
  1534.                 xfer_len, dma ? ", dma" : "");
  1535.         rx_csr &= ~MUSB_RXCSR_H_REQPKT;
  1536.  
  1537.         musb_ep_select(mbase, epnum);
  1538.         musb_writew(epio, MUSB_RXCSR,
  1539.                 MUSB_RXCSR_H_WZC_BITS | rx_csr);
  1540.     }
  1541.  
  1542.     if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
  1543.         xfer_len = dma->actual_len;
  1544.  
  1545.         val &= ~(MUSB_RXCSR_DMAENAB
  1546.             | MUSB_RXCSR_H_AUTOREQ
  1547.             | MUSB_RXCSR_AUTOCLEAR
  1548.             | MUSB_RXCSR_RXPKTRDY);
  1549.         musb_writew(hw_ep->regs, MUSB_RXCSR, val);
  1550.  
  1551. #ifdef CONFIG_USB_INVENTRA_DMA
  1552.         if (usb_pipeisoc(pipe)) {
  1553.             struct usb_iso_packet_descriptor *d;
  1554.  
  1555.             d = urb->iso_frame_desc + qh->iso_idx;
  1556.             d->actual_length = xfer_len;
  1557.  
  1558.             /* even if there was an error, we did the dma
  1559.              * for iso_frame_desc->length
  1560.              */
  1561.             if (d->status != EILSEQ && d->status != -EOVERFLOW)
  1562.                 d->status = 0;
  1563.  
  1564.             if (++qh->iso_idx >= urb->number_of_packets)
  1565.                 done = true;
  1566.             else
  1567.                 done = false;
  1568.  
  1569.         } else  {
  1570.         /* done if urb buffer is full or short packet is recd */
  1571.         done = (urb->actual_length + xfer_len >=
  1572.                 urb->transfer_buffer_length
  1573.             || dma->actual_len < qh->maxpacket);
  1574.         }
  1575.  
  1576.         /* send IN token for next packet, without AUTOREQ */
  1577.         if (!done) {
  1578.             val |= MUSB_RXCSR_H_REQPKT;
  1579.             musb_writew(epio, MUSB_RXCSR,
  1580.                 MUSB_RXCSR_H_WZC_BITS | val);
  1581.         }
  1582.  
  1583.         DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
  1584.             done ? "off" : "reset",
  1585.             musb_readw(epio, MUSB_RXCSR),
  1586.             musb_readw(epio, MUSB_RXCOUNT));
  1587. #else
  1588.         done = true;
  1589. #endif
  1590.     } else if (urb->status == -EINPROGRESS) {
  1591.         /* if no errors, be sure a packet is ready for unloading */
  1592.         if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
  1593.             status = -EPROTO;
  1594.             ERR("Rx interrupt with no errors or packet!\n");
  1595.  
  1596.             /* FIXME this is another "SHOULD NEVER HAPPEN" */
  1597.  
  1598. /* SCRUB (RX) */
  1599.             /* do the proper sequence to abort the transfer */
  1600.             musb_ep_select(mbase, epnum);
  1601.             val &= ~MUSB_RXCSR_H_REQPKT;
  1602.             musb_writew(epio, MUSB_RXCSR, val);
  1603.             goto finish;
  1604.         }
  1605.  
  1606.         /* we are expecting IN packets */
  1607.         if (musb_inventra_dma() && dma) {
  1608.             struct dma_controller   *c;
  1609.             u16         rx_count;
  1610.             int         ret, length;
  1611.             dma_addr_t      buf;
  1612.  
  1613.             rx_count = musb_readw(epio, MUSB_RXCOUNT);
  1614.  
  1615.             DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
  1616.                     epnum, rx_count,
  1617.                     urb->transfer_dma
  1618.                         + urb->actual_length,
  1619.                     qh->offset,
  1620.                     urb->transfer_buffer_length);
  1621.  
  1622.             c = musb->dma_controller;
  1623.  
  1624.             if (usb_pipeisoc(pipe)) {
  1625.                 int status = 0;
  1626.                 struct usb_iso_packet_descriptor *d;
  1627.  
  1628.                 d = urb->iso_frame_desc + qh->iso_idx;
  1629.  
  1630.                 if (iso_err) {
  1631.                     status = -EILSEQ;
  1632.                     urb->error_count++;
  1633.                 }
  1634.                 if (rx_count > d->length) {
  1635.                     if (status == 0) {
  1636.                         status = -EOVERFLOW;
  1637.                         urb->error_count++;
  1638.                     }
  1639.                     DBG(2, "** OVERFLOW %d into %d\n",\
  1640.                         rx_count, d->length);
  1641.  
  1642.                     length = d->length;
  1643.                 } else
  1644.                     length = rx_count;
  1645.                 d->status = status;
  1646.                 buf = urb->transfer_dma + d->offset;
  1647.             } else {
  1648.                 length = rx_count;
  1649.                 buf = urb->transfer_dma +
  1650.                         urb->actual_length;
  1651.             }
  1652.  
  1653.             dma->desired_mode = 0;
  1654. #ifdef USE_MODE1
  1655.             /* because of the issue below, mode 1 will
  1656.              * only rarely behave with correct semantics.
  1657.              */
  1658.             if ((urb->transfer_flags &
  1659.                         URB_SHORT_NOT_OK)
  1660.                 && (urb->transfer_buffer_length -
  1661.                         urb->actual_length)
  1662.                     > qh->maxpacket)
  1663.                 dma->desired_mode = 1;
  1664.             if (rx_count < hw_ep->max_packet_sz_rx) {
  1665.                 length = rx_count;
  1666.                 dma->bDesiredMode = 0;
  1667.             } else {
  1668.                 length = urb->transfer_buffer_length;
  1669.             }
  1670. #endif
  1671.  
  1672. /* Disadvantage of using mode 1:
  1673.  *  It's basically usable only for mass storage class; essentially all
  1674.  *  other protocols also terminate transfers on short packets.
  1675.  *
  1676.  * Details:
  1677.  *  An extra IN token is sent at the end of the transfer (due to AUTOREQ)
  1678.  *  If you try to use mode 1 for (transfer_buffer_length - 512), and try
  1679.  *  to use the extra IN token to grab the last packet using mode 0, then
  1680.  *  the problem is that you cannot be sure when the device will send the
  1681.  *  last packet and RxPktRdy set. Sometimes the packet is recd too soon
  1682.  *  such that it gets lost when RxCSR is re-set at the end of the mode 1
  1683.  *  transfer, while sometimes it is recd just a little late so that if you
  1684.  *  try to configure for mode 0 soon after the mode 1 transfer is
  1685.  *  completed, you will find rxcount 0. Okay, so you might think why not
  1686.  *  wait for an interrupt when the pkt is recd. Well, you won't get any!
  1687.  */
  1688.  
  1689.             val = musb_readw(epio, MUSB_RXCSR);
  1690.             val &= ~MUSB_RXCSR_H_REQPKT;
  1691.  
  1692.             if (dma->desired_mode == 0)
  1693.                 val &= ~MUSB_RXCSR_H_AUTOREQ;
  1694.             else
  1695.                 val |= MUSB_RXCSR_H_AUTOREQ;
  1696.  
  1697.             if (qh->maxpacket & ~0x7ff)
  1698.                 /* Autoclear doesn't work in high bandwidth iso */
  1699.                 val |= MUSB_RXCSR_DMAENAB;
  1700.             else
  1701.                 val |= MUSB_RXCSR_AUTOCLEAR
  1702.                     | MUSB_RXCSR_DMAENAB;
  1703.  
  1704.             musb_writew(epio, MUSB_RXCSR,
  1705.                 MUSB_RXCSR_H_WZC_BITS | val);
  1706.  
  1707.             /* REVISIT if when actual_length != 0,
  1708.              * transfer_buffer_length needs to be
  1709.              * adjusted first...
  1710.              */
  1711.             ret = c->channel_program(
  1712.                 dma, qh->maxpacket,
  1713.                 dma->desired_mode, buf, length);
  1714.  
  1715.             if (!ret) {
  1716.                 c->channel_release(dma);
  1717.                 hw_ep->rx_channel = NULL;
  1718.                 dma = NULL;
  1719.                 /* REVISIT reset CSR */
  1720.             }
  1721.         }
  1722.  
  1723.         if (!dma) {
  1724.             done = musb_host_packet_rx(musb, urb,
  1725.                     epnum, iso_err);
  1726.             DBG(6, "read %spacket\n", done ? "last " : "");
  1727.         }
  1728.     }
  1729.  
  1730. finish:
  1731.     urb->actual_length += xfer_len;
  1732.     qh->offset += xfer_len;
  1733.     if (done) {
  1734.         if (urb->status == -EINPROGRESS)
  1735.             urb->status = status;
  1736.         musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
  1737.     }
  1738. }
  1739.  
  1740. /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
  1741.  * the software schedule associates multiple such nodes with a given
  1742.  * host side hardware endpoint + direction; scheduling may activate
  1743.  * that hardware endpoint.
  1744.  */
  1745. static int musb_schedule(
  1746.     struct musb     *musb,
  1747.     struct musb_qh      *qh,
  1748.     int         is_in)
  1749. {
  1750.     int         idle;
  1751.     int         best_diff;
  1752.     int         best_end, epnum;
  1753.     struct musb_hw_ep   *hw_ep = NULL;
  1754.     struct list_head    *head = NULL;
  1755.     u16         maxpacket;
  1756.  
  1757.     /* use fixed hardware for control and bulk */
  1758.     if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
  1759.         hw_ep = musb->control_ep;
  1760.         head = &hw_ep->in_list;
  1761.         goto success;
  1762.     }
  1763.  
  1764.     /* else, periodic transfers get muxed to other endpoints */
  1765.  
  1766.     /* FIXME this doesn't consider direction, so it can only
  1767.      * work for one half of the endpoint hardware, and assumes
  1768.      * the previous cases handled all non-shared endpoints...
  1769.      */
  1770.  
  1771.     /* we know this qh hasn't been scheduled, so all we need to do
  1772.      * is choose which hardware endpoint to put it on ...
  1773.      *
  1774.      * REVISIT what we really want here is a regular schedule tree
  1775.      * like e.g. OHCI uses, but for now musb->periodic is just an
  1776.      * array of the _single_ logical endpoint associated with a
  1777.      * given physical one (identity mapping logical->physical).
  1778.      *
  1779.      * that simplistic approach makes TT scheduling a lot simpler;
  1780.      * there is none, and thus none of its complexity...
  1781.      */
  1782.     best_diff = 4096;
  1783.     best_end = -1;
  1784.  
  1785.     if (qh->maxpacket & (1 << 11))
  1786.         maxpacket = 2 * (qh->maxpacket & 0x7ff);
  1787.     else if (qh->maxpacket & (1 << 12))
  1788.         maxpacket = 3 * (qh->maxpacket & 0x7ff);
  1789.     else
  1790.         maxpacket = (qh->maxpacket & 0x7ff);
  1791.  
  1792.     for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
  1793.         int diff;
  1794.  
  1795.         if (musb->periodic[epnum])
  1796.             continue;
  1797.         hw_ep = &musb->endpoints[epnum];
  1798.         if (hw_ep == musb->bulk_ep)
  1799.             continue;
  1800.  
  1801.         if (is_in)
  1802.             diff = hw_ep->max_packet_sz_rx - maxpacket;
  1803.         else
  1804.             diff = hw_ep->max_packet_sz_tx - maxpacket;
  1805.  
  1806.         if (diff >= 0 && best_diff > diff) {
  1807.             best_diff = diff;
  1808.             best_end = epnum;
  1809.         }
  1810.     }
  1811.     /* use bulk reserved ep1 if no other ep is free */
  1812.     if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
  1813.         hw_ep = musb->bulk_ep;
  1814.         if (is_in)
  1815.             head = &hw_ep->in_list;
  1816.         else
  1817.             head = &hw_ep->out_list;
  1818.         /* Enable bulk NAK time out scheme when bulk requests are
  1819.          * multiplxed. This scheme doesn't work in high speed to full
  1820.          * speed scenario as NAK interrupts are not coming from a
  1821.          * full speed device connected to a high speed device.
  1822.          * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
  1823.          * 4 (8 frame or 8ms) for FS device.
  1824.          */
  1825.         if (is_in && qh->dev)
  1826.             qh->intv_reg =
  1827.                 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
  1828.         goto success;
  1829.     } else if (best_end < 0) {
  1830.         return -ENOSPC;
  1831.     }
  1832.  
  1833.     idle = 1;
  1834.     qh->mux = 0;
  1835.     hw_ep = musb->endpoints + best_end;
  1836.     musb->periodic[best_end] = qh;
  1837.     DBG(4, "qh %p periodic slot %d\n", qh, best_end);
  1838. success:
  1839.     if (head) {
  1840.         idle = list_empty(head);
  1841.         list_add_tail(&qh->ring, head);
  1842.         qh->mux = 1;
  1843.     }
  1844.     qh->hw_ep = hw_ep;
  1845.     qh->hep->hcpriv = qh;
  1846.     if (idle)
  1847.         musb_start_urb(musb, is_in, qh);
  1848.     return 0;
  1849. }
  1850.  
  1851. static int musb_urb_enqueue(
  1852.     struct usb_hcd          *hcd,
  1853.     struct urb          *urb,
  1854.     gfp_t               mem_flags)
  1855. {
  1856.     unsigned long           flags;
  1857.     struct musb         *musb = hcd_to_musb(hcd);
  1858.     struct usb_host_endpoint    *hep = urb->ep;
  1859.     struct musb_qh          *qh;
  1860.     struct usb_endpoint_descriptor  *epd = &hep->desc;
  1861.     int             ret;
  1862.     unsigned            type_reg;
  1863.     unsigned            interval;
  1864.  
  1865.     /* host role must be active */
  1866.     if (!is_host_active(musb) || !musb->is_active)
  1867.         return -ENODEV;
  1868.  
  1869.     spin_lock_irqsave(&musb->lock, flags);
  1870.     ret = usb_hcd_link_urb_to_ep(hcd, urb);
  1871.     qh = ret ? NULL : hep->hcpriv;
  1872.     if (qh)
  1873.         urb->hcpriv = qh;
  1874.     spin_unlock_irqrestore(&musb->lock, flags);
  1875.  
  1876.     /* DMA mapping was already done, if needed, and this urb is on
  1877.      * hep->urb_list now ... so we're done, unless hep wasn't yet
  1878.      * scheduled onto a live qh.
  1879.      *
  1880.      * REVISIT best to keep urb->hcpriv valid until the endpoint gets
  1881.      * disabled, testing for empty qh->ring and avoiding qh setup costs
  1882.      * except for the first urb queued after a config change.
  1883.      */
  1884.     if (qh || ret)
  1885.         return ret;
  1886.  
  1887.     /* Allocate and initialize qh, minimizing the work done each time
  1888.      * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
  1889.      *
  1890.      * REVISIT consider a dedicated qh kmem_cache, so it's harder
  1891.      * for bugs in other kernel code to break this driver...
  1892.      */
  1893.     qh = kzalloc(sizeof *qh, mem_flags);
  1894.     if (!qh) {
  1895.         spin_lock_irqsave(&musb->lock, flags);
  1896.         usb_hcd_unlink_urb_from_ep(hcd, urb);
  1897.         spin_unlock_irqrestore(&musb->lock, flags);
  1898.         return -ENOMEM;
  1899.     }
  1900.  
  1901.     qh->hep = hep;
  1902.     qh->dev = urb->dev;
  1903.     INIT_LIST_HEAD(&qh->ring);
  1904.     qh->is_ready = 1;
  1905.  
  1906.     qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
  1907.  
  1908.     qh->epnum = usb_endpoint_num(epd);
  1909.     qh->type = usb_endpoint_type(epd);
  1910.  
  1911.     /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
  1912.     qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
  1913.  
  1914.     /* precompute rxtype/txtype/type0 register */
  1915.     type_reg = (qh->type << 4) | qh->epnum;
  1916.     switch (urb->dev->speed) {
  1917.     case USB_SPEED_LOW:
  1918.         type_reg |= 0xc0;
  1919.         break;
  1920.     case USB_SPEED_FULL:
  1921.         type_reg |= 0x80;
  1922.         break;
  1923.     default:
  1924.         type_reg |= 0x40;
  1925.     }
  1926.     qh->type_reg = type_reg;
  1927.  
  1928.     /* Precompute RXINTERVAL/TXINTERVAL register */
  1929.     switch (qh->type) {
  1930.     case USB_ENDPOINT_XFER_INT:
  1931.         /*
  1932.          * Full/low speeds use the  linear encoding,
  1933.          * high speed uses the logarithmic encoding.
  1934.          */
  1935.         if (urb->dev->speed <= USB_SPEED_FULL) {
  1936.             interval = max_t(u8, epd->bInterval, 1);
  1937.             break;
  1938.         }
  1939.         /* FALLTHROUGH */
  1940.     case USB_ENDPOINT_XFER_ISOC:
  1941.         /* ISO always uses logarithmic encoding */
  1942.         interval = min_t(u8, epd->bInterval, 16);
  1943.         break;
  1944.     default:
  1945.         /* REVISIT we actually want to use NAK limits, hinting to the
  1946.          * transfer scheduling logic to try some other qh, e.g. try
  1947.          * for 2 msec first:
  1948.          *
  1949.          * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
  1950.          *
  1951.          * The downside of disabling this is that transfer scheduling
  1952.          * gets VERY unfair for nonperiodic transfers; a misbehaving
  1953.          * peripheral could make that hurt.  Or for reads, one that's
  1954.          * perfectly normal:  network and other drivers keep reads
  1955.          * posted at all times, having one pending for a week should
  1956.          * be perfectly safe.
  1957.          *
  1958.          * The upside of disabling it is avoidng transfer scheduling
  1959.          * code to put this aside for while.
  1960.          */
  1961.         interval = 0;
  1962.     }
  1963.     qh->intv_reg = interval;
  1964.  
  1965.     /* precompute addressing for external hub/tt ports */
  1966.     if (musb->is_multipoint) {
  1967.         struct usb_device   *parent = urb->dev->parent;
  1968.  
  1969.         if (parent != hcd->self.root_hub) {
  1970.             qh->h_addr_reg = (u8) parent->devnum;
  1971.  
  1972.             /* set up tt info if needed */
  1973.             if (urb->dev->tt) {
  1974.                 qh->h_port_reg = (u8) urb->dev->ttport;
  1975.                 if (urb->dev->tt->hub)
  1976.                     qh->h_addr_reg =
  1977.                         (u8) urb->dev->tt->hub->devnum;
  1978.                 if (urb->dev->tt->multi)
  1979.                     qh->h_addr_reg |= 0x80;
  1980.             }
  1981.         }
  1982.     }
  1983.  
  1984.     /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
  1985.      * until we get real dma queues (with an entry for each urb/buffer),
  1986.      * we only have work to do in the former case.
  1987.      */
  1988.     spin_lock_irqsave(&musb->lock, flags);
  1989.     if (hep->hcpriv) {
  1990.         /* some concurrent activity submitted another urb to hep...
  1991.          * odd, rare, error prone, but legal.
  1992.          */
  1993.         kfree(qh);
  1994.         ret = 0;
  1995.     } else
  1996.         ret = musb_schedule(musb, qh,
  1997.                 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
  1998.  
  1999.     if (ret == 0) {
  2000.         urb->hcpriv = qh;
  2001.         /* FIXME set urb->start_frame for iso/intr, it's tested in
  2002.          * musb_start_urb(), but otherwise only konicawc cares ...
  2003.          */
  2004.     }
  2005.     spin_unlock_irqrestore(&musb->lock, flags);
  2006.  
  2007.     if (ret != 0) {
  2008.         spin_lock_irqsave(&musb->lock, flags);
  2009.         usb_hcd_unlink_urb_from_ep(hcd, urb);
  2010.         spin_unlock_irqrestore(&musb->lock, flags);
  2011.         kfree(qh);
  2012.     }
  2013.     return ret;
  2014. }
  2015.  
  2016.  
  2017. /*
  2018.  * abort a transfer that's at the head of a hardware queue.
  2019.  * called with controller locked, irqs blocked
  2020.  * that hardware queue advances to the next transfer, unless prevented
  2021.  */
  2022. static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
  2023. {
  2024.     struct musb_hw_ep   *ep = qh->hw_ep;
  2025.     void __iomem        *epio = ep->regs;
  2026.     unsigned        hw_end = ep->epnum;
  2027.     void __iomem        *regs = ep->musb->mregs;
  2028.     u16         csr;
  2029.     int         status = 0;
  2030.  
  2031.     musb_ep_select(regs, hw_end);
  2032.  
  2033.     if (is_dma_capable()) {
  2034.         struct dma_channel  *dma;
  2035.  
  2036.         dma = is_in ? ep->rx_channel : ep->tx_channel;
  2037.         if (dma) {
  2038.             status = ep->musb->dma_controller->channel_abort(dma);
  2039.             DBG_nonverb(status ? 1 : 3,
  2040.                 "abort %cX%d DMA for urb %p --> %d\n",
  2041.                 is_in ? 'R' : 'T', ep->epnum,
  2042.                 urb, status);
  2043.             urb->actual_length += dma->actual_len;
  2044.         }
  2045.     }
  2046.  
  2047.     /* turn off DMA requests, discard state, stop polling ... */
  2048.     if (is_in) {
  2049.         /* giveback saves bulk toggle */
  2050.         csr = musb_h_flush_rxfifo(ep, 0);
  2051.  
  2052.         /* REVISIT we still get an irq; should likely clear the
  2053.          * endpoint's irq status here to avoid bogus irqs.
  2054.          * clearing that status is platform-specific...
  2055.          */
  2056.     } else {
  2057.         musb_h_tx_flush_fifo(ep);
  2058.         csr = musb_readw(epio, MUSB_TXCSR);
  2059.         csr &= ~(MUSB_TXCSR_AUTOSET
  2060.             | MUSB_TXCSR_DMAENAB
  2061.             | MUSB_TXCSR_H_RXSTALL
  2062.             | MUSB_TXCSR_H_NAKTIMEOUT
  2063.             | MUSB_TXCSR_H_ERROR
  2064.             | MUSB_TXCSR_TXPKTRDY);
  2065.         musb_writew(epio, MUSB_TXCSR, csr);
  2066.         /* REVISIT may need to clear FLUSHFIFO ... */
  2067.         musb_writew(epio, MUSB_TXCSR, csr);
  2068.         /* flush cpu writebuffer */
  2069.         csr = musb_readw(epio, MUSB_TXCSR);
  2070.     }
  2071.     if (status == 0)
  2072.         musb_advance_schedule(ep->musb, urb, ep, is_in);
  2073.     return status;
  2074. }
  2075.  
  2076. static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  2077. {
  2078.     struct musb     *musb = hcd_to_musb(hcd);
  2079.     struct musb_qh      *qh;
  2080.     struct list_head    *sched;
  2081.     unsigned long       flags;
  2082.     int         ret;
  2083.  
  2084.     DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
  2085.             usb_pipedevice(urb->pipe),
  2086.             usb_pipeendpoint(urb->pipe),
  2087.             usb_pipein(urb->pipe) ? "in" : "out");
  2088.  
  2089.     spin_lock_irqsave(&musb->lock, flags);
  2090.     ret = usb_hcd_check_unlink_urb(hcd, urb, status);
  2091.     if (ret)
  2092.         goto done;
  2093.  
  2094.     qh = urb->hcpriv;
  2095.     if (!qh)
  2096.         goto done;
  2097.  
  2098.     /* Any URB not actively programmed into endpoint hardware can be
  2099.      * immediately given back; that's any URB not at the head of an
  2100.      * endpoint queue, unless someday we get real DMA queues.  And even
  2101.      * if it's at the head, it might not be known to the hardware...
  2102.      *
  2103.      * Otherwise abort current transfer, pending dma, etc.; urb->status
  2104.      * has already been updated.  This is a synchronous abort; it'd be
  2105.      * OK to hold off until after some IRQ, though.
  2106.      */
  2107.     if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
  2108.         ret = -EINPROGRESS;
  2109.     else {
  2110.         switch (qh->type) {
  2111.         case USB_ENDPOINT_XFER_CONTROL:
  2112.             sched = &musb->control_ep->in_list;
  2113.             break;
  2114.         case USB_ENDPOINT_XFER_BULK:
  2115.             if (qh->mux == 1) {
  2116.                 if (usb_pipein(urb->pipe))
  2117.                     sched = &musb->bulk_ep->in_list;
  2118.                 else
  2119.                     sched = &musb->bulk_ep->out_list;
  2120.                 break;
  2121.             }
  2122.         default:
  2123.             /* REVISIT when we get a schedule tree, periodic
  2124.              * transfers won't always be at the head of a
  2125.              * singleton queue...
  2126.              */
  2127.             sched = NULL;
  2128.             break;
  2129.         }
  2130.     }
  2131.  
  2132.     /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
  2133.     if (ret < 0 || (sched && qh != first_qh(sched))) {
  2134.         int ready = qh->is_ready;
  2135.  
  2136.         ret = 0;
  2137.         qh->is_ready = 0;
  2138.         __musb_giveback(musb, urb, 0);
  2139.         qh->is_ready = ready;
  2140.  
  2141.         /* If nothing else (usually musb_giveback) is using it
  2142.          * and its URB list has emptied, recycle this qh.
  2143.          */
  2144.         if (ready && list_empty(&qh->hep->urb_list)) {
  2145.             qh->hep->hcpriv = NULL;
  2146.             list_del(&qh->ring);
  2147.             kfree(qh);
  2148.         }
  2149.     } else
  2150.         ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
  2151. done:
  2152.     spin_unlock_irqrestore(&musb->lock, flags);
  2153.     return ret;
  2154. }
  2155.  
  2156. /* disable an endpoint */
  2157. static void
  2158. musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
  2159. {
  2160.     u8          epnum = hep->desc.bEndpointAddress;
  2161.     unsigned long       flags;
  2162.     struct musb     *musb = hcd_to_musb(hcd);
  2163.     u8          is_in = epnum & USB_DIR_IN;
  2164.     struct musb_qh      *qh;
  2165.     struct urb      *urb;
  2166.     struct list_head    *sched;
  2167.  
  2168.     spin_lock_irqsave(&musb->lock, flags);
  2169.  
  2170.     qh = hep->hcpriv;
  2171.     if (qh == NULL)
  2172.         goto exit;
  2173.  
  2174.     switch (qh->type) {
  2175.     case USB_ENDPOINT_XFER_CONTROL:
  2176.         sched = &musb->control_ep->in_list;
  2177.         break;
  2178.     case USB_ENDPOINT_XFER_BULK:
  2179.         if (qh->mux == 1) {
  2180.             if (is_in)
  2181.                 sched = &musb->bulk_ep->in_list;
  2182.             else
  2183.                 sched = &musb->bulk_ep->out_list;
  2184.             break;
  2185.         }
  2186.     default:
  2187.         /* REVISIT when we get a schedule tree, periodic transfers
  2188.          * won't always be at the head of a singleton queue...
  2189.          */
  2190.         sched = NULL;
  2191.         break;
  2192.     }
  2193.  
  2194.     /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
  2195.  
  2196.     /* kick first urb off the hardware, if needed */
  2197.     qh->is_ready = 0;
  2198.     if (!sched || qh == first_qh(sched)) {
  2199.         urb = next_urb(qh);
  2200.  
  2201.         /* make software (then hardware) stop ASAP */
  2202.         if (!urb->unlinked)
  2203.             urb->status = -ESHUTDOWN;
  2204.  
  2205.         /* cleanup */
  2206.         musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
  2207.  
  2208.         /* Then nuke all the others ... and advance the
  2209.          * queue on hw_ep (e.g. bulk ring) when we're done.
  2210.          */
  2211.         while (!list_empty(&hep->urb_list)) {
  2212.             urb = next_urb(qh);
  2213.             urb->status = -ESHUTDOWN;
  2214.             musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
  2215.         }
  2216.     } else {
  2217.         /* Just empty the queue; the hardware is busy with
  2218.          * other transfers, and since !qh->is_ready nothing
  2219.          * will activate any of these as it advances.
  2220.          */
  2221.         while (!list_empty(&hep->urb_list))
  2222.             __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
  2223.  
  2224.         hep->hcpriv = NULL;
  2225.         list_del(&qh->ring);
  2226.         kfree(qh);
  2227.     }
  2228. exit:
  2229.     spin_unlock_irqrestore(&musb->lock, flags);
  2230. }
  2231.  
  2232. static int musb_h_get_frame_number(struct usb_hcd *hcd)
  2233. {
  2234.     struct musb *musb = hcd_to_musb(hcd);
  2235.  
  2236.     return musb_readw(musb->mregs, MUSB_FRAME);
  2237. }
  2238.  
  2239. static int musb_h_start(struct usb_hcd *hcd)
  2240. {
  2241.     struct musb *musb = hcd_to_musb(hcd);
  2242.  
  2243.     /* NOTE: musb_start() is called when the hub driver turns
  2244.      * on port power, or when (OTG) peripheral starts.
  2245.      */
  2246.     hcd->state = HC_STATE_RUNNING;
  2247.     musb->port1_status = 0;
  2248.     return 0;
  2249. }
  2250.  
  2251. static void musb_h_stop(struct usb_hcd *hcd)
  2252. {
  2253.     musb_stop(hcd_to_musb(hcd));
  2254.     hcd->state = HC_STATE_HALT;
  2255. }
  2256.  
  2257. static int musb_bus_suspend(struct usb_hcd *hcd)
  2258. {
  2259.     struct musb *musb = hcd_to_musb(hcd);
  2260.  
  2261.     if (musb->xceiv->state == OTG_STATE_A_SUSPEND)
  2262.         return 0;
  2263.  
  2264.     if (is_host_active(musb) && musb->is_active) {
  2265.         WARNING("trying to suspend as %s is_active=%i\n",
  2266.             otg_state_string(musb), musb->is_active);
  2267.         return -EBUSY;
  2268.     } else
  2269.         return 0;
  2270. }
  2271.  
  2272. static int musb_bus_resume(struct usb_hcd *hcd)
  2273. {
  2274.     /* resuming child port does the work */
  2275.     return 0;
  2276. }
  2277.  
  2278. const struct hc_driver musb_hc_driver = {
  2279.     .description        = "musb-hcd",
  2280.     .product_desc       = "MUSB HDRC host driver",
  2281.     .hcd_priv_size      = sizeof(struct musb),
  2282.     .flags          = HCD_USB2 | HCD_MEMORY,
  2283.  
  2284.     /* not using irq handler or reset hooks from usbcore, since
  2285.      * those must be shared with peripheral code for OTG configs
  2286.      */
  2287.  
  2288.     .start          = musb_h_start,
  2289.     .stop           = musb_h_stop,
  2290.  
  2291.     .get_frame_number   = musb_h_get_frame_number,
  2292.  
  2293.     .urb_enqueue        = musb_urb_enqueue,
  2294.     .urb_dequeue        = musb_urb_dequeue,
  2295.     .endpoint_disable   = musb_h_disable,
  2296.  
  2297.     .hub_status_data    = musb_hub_status_data,
  2298.     .hub_control        = musb_hub_control,
  2299.     .bus_suspend        = musb_bus_suspend,
  2300.     .bus_resume     = musb_bus_resume,
  2301.     /* .start_port_reset    = NULL, */
  2302.     /* .hub_irq_enable  = NULL, */
  2303. };
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement