Guest User

Untitled

a guest
Feb 23rd, 2011
171
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 63.94 KB | None | 0 0
  1. /*
  2.  * MUSB OTG driver host support
  3.  *
  4.  * Copyright 2005 Mentor Graphics Corporation
  5.  * Copyright (C) 2005-2006 by Texas Instruments
  6.  * Copyright (C) 2006-2007 Nokia Corporation
  7.  *
  8.  * This program is free software; you can redistribute it and/or
  9.  * modify it under the terms of the GNU General Public License
  10.  * version 2 as published by the Free Software Foundation.
  11.  *
  12.  * This program is distributed in the hope that it will be useful, but
  13.  * WITHOUT ANY WARRANTY; without even the implied warranty of
  14.  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15.  * General Public License for more details.
  16.  *
  17.  * You should have received a copy of the GNU General Public License
  18.  * along with this program; if not, write to the Free Software
  19.  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  20.  * 02110-1301 USA
  21.  *
  22.  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  23.  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  24.  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  25.  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  26.  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  27.  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  28.  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  29.  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  30.  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  31.  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32.  *
  33.  */
  34.  
  35. #include <linux/module.h>
  36. #include <linux/kernel.h>
  37. #include <linux/delay.h>
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/errno.h>
  41. #include <linux/init.h>
  42. #include <linux/list.h>
  43.  
  44. #include "musb_core.h"
  45. #include "musb_host.h"
  46.  
  47.  
  48. /* MUSB HOST status 22-mar-2006
  49.  *
  50.  * - There's still lots of partial code duplication for fault paths, so
  51.  *   they aren't handled as consistently as they need to be.
  52.  *
  53.  * - PIO mostly behaved when last tested.
  54.  *     + including ep0, with all usbtest cases 9, 10
  55.  *     + usbtest 14 (ep0out) doesn't seem to run at all
  56.  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
  57.  *       configurations, but otherwise double buffering passes basic tests.
  58.  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
  59.  *
  60.  * - DMA (CPPI) ... partially behaves, not currently recommended
  61.  *     + about 1/15 the speed of typical EHCI implementations (PCI)
  62.  *     + RX, all too often reqpkt seems to misbehave after tx
  63.  *     + TX, no known issues (other than evident silicon issue)
  64.  *
  65.  * - DMA (Mentor/OMAP) ...has at least toggle update problems
  66.  *
  67.  * - Still no traffic scheduling code to make NAKing for bulk or control
  68.  *   transfers unable to starve other requests; or to make efficient use
  69.  *   of hardware with periodic transfers.  (Note that network drivers
  70.  *   commonly post bulk reads that stay pending for a long time; these
  71.  *   would make very visible trouble.)
  72.  *
  73.  * - Not tested with HNP, but some SRP paths seem to behave.
  74.  *
  75.  * NOTE 24-August-2006:
  76.  *
  77.  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
  78.  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
  79.  *   mostly works, except that with "usbnet" it's easy to trigger cases
  80.  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
  81.  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
  82.  *   although ARP RX wins.  (That test was done with a full speed link.)
  83.  */
  84.  
  85.  
  86. /*
  87.  * NOTE on endpoint usage:
  88.  *
  89.  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
  90.  * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
  91.  *
  92.  * (Yes, bulk _could_ use more of the endpoints than that, and would even
  93.  * benefit from it ... one remote device may easily be NAKing while others
  94.  * need to perform transfers in that same direction.  The same thing could
  95.  * be done in software though, assuming dma cooperates.)
  96.  *
  97.  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
  98.  * So far that scheduling is both dumb and optimistic:  the endpoint will be
  99.  * "claimed" until its software queue is no longer refilled.  No multiplexing
  100.  * of transfers between endpoints, or anything clever.
  101.  */
  102.  
  103.  
  104. static void musb_ep_program(struct musb *musb, u8 epnum,
  105.             struct urb *urb, unsigned int nOut,
  106.             u8 *buf, u32 len);
  107.  
  108. /*
  109.  * Clear TX fifo. Needed to avoid BABBLE errors.
  110.  */
  111. static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
  112. {
  113.     void __iomem    *epio = ep->regs;
  114.     u16     csr;
  115.     u16     lastcsr = 0;
  116.     int     retries = 1000;
  117.  
  118.     csr = musb_readw(epio, MUSB_TXCSR);
  119.     while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  120.         if (csr != lastcsr)
  121.             DBG_nonverb(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
  122.         lastcsr = csr;
  123.         csr |= MUSB_TXCSR_FLUSHFIFO;
  124.         musb_writew(epio, MUSB_TXCSR, csr);
  125.         csr = musb_readw(epio, MUSB_TXCSR);
  126.         if (WARN(retries-- < 1,
  127.                 "Could not flush host TX%d fifo: csr: %04x\n",
  128.                 ep->epnum, csr))
  129.             return;
  130.         mdelay(1);
  131.     }
  132. }
  133.  
  134. /*
  135.  * Start transmit. Caller is responsible for locking shared resources.
  136.  * musb must be locked.
  137.  */
  138. static inline void musb_h_tx_start(struct musb_hw_ep *ep)
  139. {
  140.     u16 txcsr;
  141.  
  142.     /* NOTE: no locks here; caller should lock and select EP */
  143.     if (ep->epnum) {
  144.         txcsr = musb_readw(ep->regs, MUSB_TXCSR);
  145.         txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
  146.         musb_writew(ep->regs, MUSB_TXCSR, txcsr);
  147.     } else {
  148.         txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
  149.         musb_writew(ep->regs, MUSB_CSR0, txcsr);
  150.     }
  151.  
  152. }
  153.  
  154. static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
  155. {
  156.     u16 txcsr;
  157.  
  158.     /* NOTE: no locks here; caller should lock and select EP */
  159.     txcsr = musb_readw(ep->regs, MUSB_TXCSR);
  160.     txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
  161.     musb_writew(ep->regs, MUSB_TXCSR, txcsr);
  162. }
  163.  
  164. /*
  165.  * Start the URB at the front of an endpoint's queue
  166.  * end must be claimed from the caller.
  167.  *
  168.  * Context: controller locked, irqs blocked
  169.  */
  170. static void
  171. musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
  172. {
  173.     u16         frame;
  174.     u32         len;
  175.     void            *buf;
  176.     void __iomem        *mbase =  musb->mregs;
  177.     struct urb      *urb = next_urb(qh);
  178.     struct musb_hw_ep   *hw_ep = qh->hw_ep;
  179.     int         epnum = hw_ep->epnum;
  180.  
  181.     /* initialize software qh state */
  182.     qh->offset = 0;
  183.     qh->segsize = 0;
  184.  
  185.     /* gather right source of data */
  186.     switch (qh->type) {
  187.     case USB_ENDPOINT_XFER_CONTROL:
  188.         /* control transfers always start with SETUP */
  189.         is_in = 0;
  190.         hw_ep->out_qh = qh;
  191.         musb->ep0_stage = MUSB_EP0_START;
  192.         buf = urb->setup_packet;
  193.         len = 8;
  194.         break;
  195.     case USB_ENDPOINT_XFER_ISOC:
  196.         qh->iso_idx = 0;
  197.         qh->frame = 0;
  198.         buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
  199.         len = urb->iso_frame_desc[0].length;
  200.         break;
  201.     default:        /* bulk, interrupt */
  202.         buf = urb->transfer_buffer;
  203.         len = urb->transfer_buffer_length;
  204.     }
  205.  
  206.     DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
  207.             qh, urb, usb_pipedevice(urb->pipe), qh->epnum,
  208.             is_in ? "in" : "out",
  209.             ({char *s; switch (qh->type) {
  210.             case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
  211.             case USB_ENDPOINT_XFER_BULK:    s = "-bulk"; break;
  212.             case USB_ENDPOINT_XFER_ISOC:    s = "-iso"; break;
  213.             default:            s = "-intr"; break;
  214.             }; s; }),
  215.             epnum, buf, len);
  216.  
  217.     /* Configure endpoint */
  218.     if (is_in || hw_ep->is_shared_fifo)
  219.         hw_ep->in_qh = qh;
  220.     else
  221.         hw_ep->out_qh = qh;
  222.     musb_ep_program(musb, epnum, urb, !is_in, buf, len);
  223.  
  224.     /* transmit may have more work: start it when it is time */
  225.     if (is_in)
  226.         return;
  227.  
  228.     /* determine if the time is right for a periodic transfer */
  229.     switch (qh->type) {
  230.     case USB_ENDPOINT_XFER_ISOC:
  231.     case USB_ENDPOINT_XFER_INT:
  232.         DBG(3, "check whether there's still time for periodic Tx\n");
  233.         qh->iso_idx = 0;
  234.         frame = musb_readw(mbase, MUSB_FRAME);
  235.         /* FIXME this doesn't implement that scheduling policy ...
  236.          * or handle framecounter wrapping
  237.          */
  238.         if ((urb->transfer_flags & URB_ISO_ASAP)
  239.                 || (frame >= urb->start_frame)) {
  240.             /* REVISIT the SOF irq handler shouldn't duplicate
  241.              * this code; and we don't init urb->start_frame...
  242.              */
  243.             qh->frame = 0;
  244.             goto start;
  245.         } else {
  246.             qh->frame = urb->start_frame;
  247.             /* enable SOF interrupt so we can count down */
  248.             DBG(1, "SOF for %d\n", epnum);
  249. #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
  250.             musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
  251. #endif
  252.         }
  253.         break;
  254.     default:
  255. start:
  256.         DBG(4, "Start TX%d %s\n", epnum,
  257.             hw_ep->tx_channel ? "dma" : "pio");
  258.  
  259.         if (!hw_ep->tx_channel)
  260.             musb_h_tx_start(hw_ep);
  261.         else if (cppi_ti_dma() || tusb_dma_omap())
  262.             cppi_host_txdma_start(hw_ep);
  263.     }
  264. }
  265.  
  266. /* caller owns controller lock, irqs are blocked */
  267. static void
  268. __musb_giveback(struct musb *musb, struct urb *urb, int status)
  269. __releases(musb->lock)
  270. __acquires(musb->lock)
  271. {
  272.     DBG(({ int level; switch (status) {
  273.                 case 0:
  274.                     level = 4;
  275.                     break;
  276.                 /* common/boring faults */
  277.                 case -EREMOTEIO:
  278.                 case -ESHUTDOWN:
  279.                 case -ECONNRESET:
  280.                 case -EPIPE:
  281.                     level = 3;
  282.                     break;
  283.                 default:
  284.                     level = 2;
  285.                     break;
  286.                 }; level; }),
  287.             "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
  288.             urb, urb->complete, status,
  289.             usb_pipedevice(urb->pipe),
  290.             usb_pipeendpoint(urb->pipe),
  291.             usb_pipein(urb->pipe) ? "in" : "out",
  292.             urb->actual_length, urb->transfer_buffer_length
  293.             );
  294.  
  295.     usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
  296.     spin_unlock(&musb->lock);
  297.     usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
  298.     spin_lock(&musb->lock);
  299. }
  300.  
  301. /* for bulk/interrupt endpoints only */
  302. static inline void
  303. musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
  304. {
  305.     struct usb_device   *udev = urb->dev;
  306.     u16         csr;
  307.     void __iomem        *epio = ep->regs;
  308.     struct musb_qh      *qh;
  309.  
  310.     /* FIXME:  the current Mentor DMA code seems to have
  311.      * problems getting toggle correct.
  312.      */
  313.  
  314.     if (is_in || ep->is_shared_fifo)
  315.         qh = ep->in_qh;
  316.     else
  317.         qh = ep->out_qh;
  318.  
  319.     if (!is_in) {
  320.         csr = musb_readw(epio, MUSB_TXCSR);
  321.         usb_settoggle(udev, qh->epnum, 1,
  322.             (csr & MUSB_TXCSR_H_DATATOGGLE)
  323.                 ? 1 : 0);
  324.     } else {
  325.         csr = musb_readw(epio, MUSB_RXCSR);
  326.         usb_settoggle(udev, qh->epnum, 0,
  327.             (csr & MUSB_RXCSR_H_DATATOGGLE)
  328.                 ? 1 : 0);
  329.     }
  330. }
  331.  
  332. /* caller owns controller lock, irqs are blocked */
  333. static struct musb_qh *
  334. musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
  335. {
  336.     struct musb_hw_ep   *ep = qh->hw_ep;
  337.     struct musb     *musb = ep->musb;
  338.     int         is_in = usb_pipein(urb->pipe);
  339.     int         ready = qh->is_ready;
  340.  
  341.     /* save toggle eagerly, for paranoia */
  342.     switch (qh->type) {
  343.     case USB_ENDPOINT_XFER_BULK:
  344.     case USB_ENDPOINT_XFER_INT:
  345.         musb_save_toggle(ep, is_in, urb);
  346.         break;
  347.     case USB_ENDPOINT_XFER_ISOC:
  348.         if (status == 0 && urb->error_count)
  349.             status = -EXDEV;
  350.         break;
  351.     }
  352.  
  353.     qh->is_ready = 0;
  354.     __musb_giveback(musb, urb, status);
  355.     qh->is_ready = ready;
  356.  
  357.     /* reclaim resources (and bandwidth) ASAP; deschedule it, and
  358.      * invalidate qh as soon as list_empty(&hep->urb_list)
  359.      */
  360.     if (list_empty(&qh->hep->urb_list)) {
  361.         struct list_head    *head;
  362.  
  363.         if (is_in)
  364.             ep->rx_reinit = 1;
  365.         else
  366.             ep->tx_reinit = 1;
  367.  
  368.         /* clobber old pointers to this qh */
  369.         if (is_in || ep->is_shared_fifo)
  370.             ep->in_qh = NULL;
  371.         else
  372.             ep->out_qh = NULL;
  373.         qh->hep->hcpriv = NULL;
  374.  
  375.         switch (qh->type) {
  376.  
  377.         case USB_ENDPOINT_XFER_CONTROL:
  378.         case USB_ENDPOINT_XFER_BULK:
  379.             /* fifo policy for these lists, except that NAKing
  380.              * should rotate a qh to the end (for fairness).
  381.              */
  382.             if (qh->mux == 1) {
  383.                 head = qh->ring.prev;
  384.                 list_del(&qh->ring);
  385.                 kfree(qh);
  386.                 qh = first_qh(head);
  387.                 break;
  388.             }
  389.  
  390.         case USB_ENDPOINT_XFER_ISOC:
  391.         case USB_ENDPOINT_XFER_INT:
  392.             /* this is where periodic bandwidth should be
  393.              * de-allocated if it's tracked and allocated;
  394.              * and where we'd update the schedule tree...
  395.              */
  396.             musb->periodic[ep->epnum] = NULL;
  397.             kfree(qh);
  398.             qh = NULL;
  399.             break;
  400.         }
  401.     }
  402.     return qh;
  403. }
  404.  
  405. /*
  406.  * Advance this hardware endpoint's queue, completing the specified urb and
  407.  * advancing to either the next urb queued to that qh, or else invalidating
  408.  * that qh and advancing to the next qh scheduled after the current one.
  409.  *
  410.  * Context: caller owns controller lock, irqs are blocked
  411.  */
  412. static void
  413. musb_advance_schedule(struct musb *musb, struct urb *urb,
  414.         struct musb_hw_ep *hw_ep, int is_in)
  415. {
  416.     struct musb_qh  *qh;
  417.  
  418.     if (is_in || hw_ep->is_shared_fifo)
  419.         qh = hw_ep->in_qh;
  420.     else
  421.         qh = hw_ep->out_qh;
  422.  
  423.     if (urb->status == -EINPROGRESS)
  424.         qh = musb_giveback(qh, urb, 0);
  425.     else
  426.         qh = musb_giveback(qh, urb, urb->status);
  427.  
  428.     if (qh != NULL && qh->is_ready) {
  429.         DBG(4, "... next ep%d %cX urb %p\n",
  430.                 hw_ep->epnum, is_in ? 'R' : 'T',
  431.                 next_urb(qh));
  432.         musb_start_urb(musb, is_in, qh);
  433.     }
  434. }
  435.  
  436. static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
  437. {
  438.     /* we don't want fifo to fill itself again;
  439.      * ignore dma (various models),
  440.      * leave toggle alone (may not have been saved yet)
  441.      */
  442.     csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
  443.     csr &= ~(MUSB_RXCSR_H_REQPKT
  444.         | MUSB_RXCSR_H_AUTOREQ
  445.         | MUSB_RXCSR_AUTOCLEAR);
  446.  
  447.     /* write 2x to allow double buffering */
  448.     musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  449.     musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  450.  
  451.     /* flush writebuffer */
  452.     return musb_readw(hw_ep->regs, MUSB_RXCSR);
  453. }
  454.  
  455. /*
  456.  * PIO RX for a packet (or part of it).
  457.  */
  458. static bool
  459. musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
  460. {
  461.     u16         rx_count;
  462.     u8          *buf;
  463.     u16         csr;
  464.     bool            done = false;
  465.     u32         length;
  466.     int         do_flush = 0;
  467.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  468.     void __iomem        *epio = hw_ep->regs;
  469.     struct musb_qh      *qh = hw_ep->in_qh;
  470.     int         pipe = urb->pipe;
  471.     void            *buffer = urb->transfer_buffer;
  472.  
  473.     /* musb_ep_select(mbase, epnum); */
  474.     rx_count = musb_readw(epio, MUSB_RXCOUNT);
  475.     DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
  476.             urb->transfer_buffer, qh->offset,
  477.             urb->transfer_buffer_length);
  478.  
  479.     /* unload FIFO */
  480.     if (usb_pipeisoc(pipe)) {
  481.         int                 status = 0;
  482.         struct usb_iso_packet_descriptor    *d;
  483.  
  484.         if (iso_err) {
  485.             status = -EILSEQ;
  486.             urb->error_count++;
  487.         }
  488.  
  489.         d = urb->iso_frame_desc + qh->iso_idx;
  490.         buf = buffer + d->offset;
  491.         length = d->length;
  492.         if (rx_count > length) {
  493.             if (status == 0) {
  494.                 status = -EOVERFLOW;
  495.                 urb->error_count++;
  496.             }
  497.             DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
  498.             do_flush = 1;
  499.         } else
  500.             length = rx_count;
  501.         urb->actual_length += length;
  502.         d->actual_length = length;
  503.  
  504.         d->status = status;
  505.  
  506.         /* see if we are done */
  507.         done = (++qh->iso_idx >= urb->number_of_packets);
  508.     } else {
  509.         /* non-isoch */
  510.         buf = buffer + qh->offset;
  511.         length = urb->transfer_buffer_length - qh->offset;
  512.         if (rx_count > length) {
  513.             if (urb->status == -EINPROGRESS)
  514.                 urb->status = -EOVERFLOW;
  515.             DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
  516.             do_flush = 1;
  517.         } else
  518.             length = rx_count;
  519.         urb->actual_length += length;
  520.         qh->offset += length;
  521.  
  522.         /* see if we are done */
  523.         done = (urb->actual_length == urb->transfer_buffer_length)
  524.             || (rx_count < qh->maxpacket)
  525.             || (urb->status != -EINPROGRESS);
  526.         if (done
  527.                 && (urb->status == -EINPROGRESS)
  528.                 && (urb->transfer_flags & URB_SHORT_NOT_OK)
  529.                 && (urb->actual_length
  530.                     < urb->transfer_buffer_length))
  531.             urb->status = -EREMOTEIO;
  532.     }
  533.  
  534.     musb_read_fifo(hw_ep, length, buf);
  535.  
  536.     csr = musb_readw(epio, MUSB_RXCSR);
  537.     csr |= MUSB_RXCSR_H_WZC_BITS;
  538.     if (unlikely(do_flush))
  539.         musb_h_flush_rxfifo(hw_ep, csr);
  540.     else {
  541.         /* REVISIT this assumes AUTOCLEAR is never set */
  542.         csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
  543.         if (!done)
  544.             csr |= MUSB_RXCSR_H_REQPKT;
  545.         musb_writew(epio, MUSB_RXCSR, csr);
  546.     }
  547.  
  548.     return done;
  549. }
  550.  
  551. /* we don't always need to reinit a given side of an endpoint...
  552.  * when we do, use tx/rx reinit routine and then construct a new CSR
  553.  * to address data toggle, NYET, and DMA or PIO.
  554.  *
  555.  * it's possible that driver bugs (especially for DMA) or aborting a
  556.  * transfer might have left the endpoint busier than it should be.
  557.  * the busy/not-empty tests are basically paranoia.
  558.  */
  559. static void
  560. musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
  561. {
  562.     u16 csr;
  563.  
  564.     /* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
  565.      * That always uses tx_reinit since ep0 repurposes TX register
  566.      * offsets; the initial SETUP packet is also a kind of OUT.
  567.      */
  568.  
  569.     /* if programmed for Tx, put it in RX mode */
  570.     if (ep->is_shared_fifo) {
  571.         csr = musb_readw(ep->regs, MUSB_TXCSR);
  572.         if (csr & MUSB_TXCSR_MODE) {
  573.             musb_h_tx_flush_fifo(ep);
  574.             musb_writew(ep->regs, MUSB_TXCSR,
  575.                     MUSB_TXCSR_FRCDATATOG);
  576.         }
  577.         /* clear mode (and everything else) to enable Rx */
  578.         musb_writew(ep->regs, MUSB_TXCSR, 0);
  579.  
  580.     /* scrub all previous state, clearing toggle */
  581.     } else {
  582.         csr = musb_readw(ep->regs, MUSB_RXCSR);
  583.         if (csr & MUSB_RXCSR_RXPKTRDY)
  584.             WARNING("rx%d, packet/%d ready?\n", ep->epnum,
  585.                 musb_readw(ep->regs, MUSB_RXCOUNT));
  586.  
  587.         musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
  588.     }
  589.  
  590.     /* target addr and (for multipoint) hub addr/port */
  591.     if (musb->is_multipoint) {
  592.         musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
  593.         musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
  594.         musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
  595.  
  596.     } else
  597.         musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
  598.  
  599.     /* protocol/endpoint, interval/NAKlimit, i/o size */
  600.     musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
  601.     musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
  602.     /* NOTE: bulk combining rewrites high bits of maxpacket */
  603.     musb_writew(ep->regs, MUSB_RXMAXP,
  604.     qh->maxpacket | ((qh->hb_mult - 1) << 11));
  605.  
  606.     ep->rx_reinit = 0;
  607. }
  608.  
  609.  
  610. /*
  611.  * Program an HDRC endpoint as per the given URB
  612.  * Context: irqs blocked, controller lock held
  613.  */
  614. static void musb_ep_program(struct musb *musb, u8 epnum,
  615.             struct urb *urb, unsigned int is_out,
  616.             u8 *buf, u32 len)
  617. {
  618.     struct dma_controller   *dma_controller;
  619.     struct dma_channel  *dma_channel;
  620.     u8          dma_ok;
  621.     void __iomem        *mbase = musb->mregs;
  622.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  623.     void __iomem        *epio = hw_ep->regs;
  624.     struct musb_qh      *qh;
  625.     u16         packet_sz;
  626.  
  627.     if (!is_out || hw_ep->is_shared_fifo)
  628.         qh = hw_ep->in_qh;
  629.     else
  630.         qh = hw_ep->out_qh;
  631.  
  632.     packet_sz = qh->maxpacket;
  633.  
  634.     DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
  635.                 "h_addr%02x h_port%02x bytes %d\n",
  636.             is_out ? "-->" : "<--",
  637.             epnum, urb, urb->dev->speed,
  638.             qh->addr_reg, qh->epnum, is_out ? "out" : "in",
  639.             qh->h_addr_reg, qh->h_port_reg,
  640.             len);
  641.  
  642.     musb_ep_select(mbase, epnum);
  643.  
  644.     /* candidate for DMA? */
  645.     dma_controller = musb->dma_controller;
  646.     if (is_dma_capable() && epnum && dma_controller) {
  647.         dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
  648.         if (!dma_channel) {
  649.             dma_channel = dma_controller->channel_alloc(
  650.                     dma_controller, hw_ep, is_out);
  651.             if (is_out)
  652.                 hw_ep->tx_channel = dma_channel;
  653.             else
  654.                 hw_ep->rx_channel = dma_channel;
  655.         }
  656.     } else
  657.         dma_channel = NULL;
  658.  
  659.     /* make sure we clear DMAEnab, autoSet bits from previous run */
  660.  
  661.     /* OUT/transmit/EP0 or IN/receive? */
  662.     if (is_out) {
  663.         u16 csr;
  664.         u16 int_txe;
  665.         u16 load_count;
  666.  
  667.         csr = musb_readw(epio, MUSB_TXCSR);
  668.  
  669.         /* disable interrupt in case we flush */
  670.         int_txe = musb_readw(mbase, MUSB_INTRTXE);
  671.         musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
  672.  
  673.         /* general endpoint setup */
  674.         if (epnum) {
  675.             /* ASSERT:  TXCSR_DMAENAB was already cleared */
  676.  
  677.             /* flush all old state, set default */
  678.             musb_h_tx_flush_fifo(hw_ep);
  679.             csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
  680.                     | MUSB_TXCSR_DMAMODE
  681.                     | MUSB_TXCSR_FRCDATATOG
  682.                     | MUSB_TXCSR_H_RXSTALL
  683.                     | MUSB_TXCSR_H_ERROR
  684.                     | MUSB_TXCSR_TXPKTRDY
  685.                     );
  686.             csr |= MUSB_TXCSR_MODE;
  687.  
  688.             if (usb_gettoggle(urb->dev,
  689.                     qh->epnum, 1))
  690.                 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
  691.                     | MUSB_TXCSR_H_DATATOGGLE;
  692.             else
  693.                 csr |= MUSB_TXCSR_CLRDATATOG;
  694.  
  695.             /* twice in case of double packet buffering */
  696.             musb_writew(epio, MUSB_TXCSR, csr);
  697.             /* REVISIT may need to clear FLUSHFIFO ... */
  698.             musb_writew(epio, MUSB_TXCSR, csr);
  699.             csr = musb_readw(epio, MUSB_TXCSR);
  700.         } else {
  701.             /* endpoint 0: just flush */
  702.             musb_writew(epio, MUSB_CSR0,
  703.                 csr | MUSB_CSR0_FLUSHFIFO);
  704.             musb_writew(epio, MUSB_CSR0,
  705.                 csr | MUSB_CSR0_FLUSHFIFO);
  706.         }
  707.  
  708.         /* target addr and (for multipoint) hub addr/port */
  709.         if (musb->is_multipoint) {
  710.             musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
  711.             musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
  712.             musb_write_txhubport(mbase, epnum, qh->h_port_reg);
  713. /* FIXME if !epnum, do the same for RX ... */
  714.         } else
  715.             musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
  716.  
  717.         /* protocol/endpoint/interval/NAKlimit */
  718.         if (epnum) {
  719.             musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
  720.             if (can_bulk_split(musb, qh->type))
  721.                 musb_writew(epio, MUSB_TXMAXP,
  722.                     packet_sz
  723.                     | ((hw_ep->max_packet_sz_tx /
  724.                         packet_sz) - 1) << 11);
  725.             else
  726.                 musb_writew(epio, MUSB_TXMAXP,
  727.                     packet_sz);
  728.             musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
  729.         } else {
  730.             musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
  731.             if (musb->is_multipoint)
  732.                 musb_writeb(epio, MUSB_TYPE0,
  733.                         qh->type_reg);
  734.         }
  735.  
  736.         if (can_bulk_split(musb, qh->type))
  737.             load_count = min((u32) hw_ep->max_packet_sz_tx,
  738.                         len);
  739.         else
  740.             load_count = min((u32) packet_sz, len);
  741.  
  742.         if (musb_inventra_dma() && dma_channel) {
  743.  
  744.             /* clear previous state */
  745.             csr = musb_readw(epio, MUSB_TXCSR);
  746.             csr &= ~(MUSB_TXCSR_AUTOSET
  747.                 | MUSB_TXCSR_DMAMODE
  748.                 | MUSB_TXCSR_DMAENAB);
  749.             csr |= MUSB_TXCSR_MODE;
  750.             musb_writew(epio, MUSB_TXCSR,
  751.                 csr | MUSB_TXCSR_MODE);
  752.  
  753.             qh->segsize = min(len, dma_channel->max_len);
  754.  
  755.             if (qh->segsize <= packet_sz)
  756.                 dma_channel->desired_mode = 0;
  757.             else
  758.                 dma_channel->desired_mode = 1;
  759.  
  760.  
  761.             if (dma_channel->desired_mode == 0) {
  762.                 csr &= ~(MUSB_TXCSR_AUTOSET
  763.                     | MUSB_TXCSR_DMAMODE);
  764.                 csr |= (MUSB_TXCSR_DMAENAB);
  765.                     /* against programming guide */
  766.             } else
  767.                 csr |= (MUSB_TXCSR_AUTOSET
  768.                     | MUSB_TXCSR_DMAENAB
  769.                     | MUSB_TXCSR_DMAMODE);
  770.  
  771.             musb_writew(epio, MUSB_TXCSR, csr);
  772.  
  773.             dma_ok = dma_controller->channel_program(
  774.                     dma_channel, packet_sz,
  775.                     dma_channel->desired_mode,
  776.                     urb->transfer_dma,
  777.                     qh->segsize);
  778.             if (dma_ok) {
  779.                 load_count = 0;
  780.             } else {
  781.                 dma_controller->channel_release(dma_channel);
  782.                 if (is_out)
  783.                     hw_ep->tx_channel = NULL;
  784.                 else
  785.                     hw_ep->rx_channel = NULL;
  786.                 dma_channel = NULL;
  787.             }
  788.         }
  789.  
  790.         /* candidate for DMA */
  791.         if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
  792.  
  793.             /* program endpoint CSRs first, then setup DMA.
  794.              * assume CPPI setup succeeds.
  795.              * defer enabling dma.
  796.              */
  797.             csr = musb_readw(epio, MUSB_TXCSR);
  798.             csr &= ~(MUSB_TXCSR_AUTOSET
  799.                     | MUSB_TXCSR_DMAMODE
  800.                     | MUSB_TXCSR_DMAENAB);
  801.             csr |= MUSB_TXCSR_MODE;
  802.             musb_writew(epio, MUSB_TXCSR,
  803.                 csr | MUSB_TXCSR_MODE);
  804.  
  805.             dma_channel->actual_len = 0L;
  806.             qh->segsize = len;
  807.  
  808.             /* TX uses "rndis" mode automatically, but needs help
  809.              * to identify the zero-length-final-packet case.
  810.              */
  811.             dma_ok = dma_controller->channel_program(
  812.                     dma_channel, packet_sz,
  813.                     (urb->transfer_flags
  814.                             & URB_ZERO_PACKET)
  815.                         == URB_ZERO_PACKET,
  816.                     urb->transfer_dma,
  817.                     qh->segsize);
  818.             if (dma_ok) {
  819.                 load_count = 0;
  820.             } else {
  821.                 dma_controller->channel_release(dma_channel);
  822.                 hw_ep->tx_channel = NULL;
  823.                 dma_channel = NULL;
  824.  
  825.                 /* REVISIT there's an error path here that
  826.                  * needs handling:  can't do dma, but
  827.                  * there's no pio buffer address...
  828.                  */
  829.             }
  830.         }
  831.  
  832.         if (load_count) {
  833.             /* ASSERT:  TXCSR_DMAENAB was already cleared */
  834.  
  835.             /* PIO to load FIFO */
  836.             qh->segsize = load_count;
  837.             musb_write_fifo(hw_ep, load_count, buf);
  838.             csr = musb_readw(epio, MUSB_TXCSR);
  839.             csr &= ~(MUSB_TXCSR_DMAENAB
  840.                 | MUSB_TXCSR_DMAMODE
  841.                 | MUSB_TXCSR_AUTOSET);
  842.             /* write CSR */
  843.             csr |= MUSB_TXCSR_MODE;
  844.  
  845.             if (epnum)
  846.                 musb_writew(epio, MUSB_TXCSR, csr);
  847.         }
  848.  
  849.         /* re-enable interrupt */
  850.         musb_writew(mbase, MUSB_INTRTXE, int_txe);
  851.  
  852.     /* IN/receive */
  853.     } else {
  854.         u16 csr;
  855.  
  856.         if (hw_ep->rx_reinit) {
  857.             musb_rx_reinit(musb, qh, hw_ep);
  858.  
  859.             /* init new state: toggle and NYET, maybe DMA later */
  860.             if (usb_gettoggle(urb->dev, qh->epnum, 0))
  861.                 csr = MUSB_RXCSR_H_WR_DATATOGGLE
  862.                     | MUSB_RXCSR_H_DATATOGGLE;
  863.             else
  864.                 csr = 0;
  865.             if (qh->type == USB_ENDPOINT_XFER_INT)
  866.                 csr |= MUSB_RXCSR_DISNYET;
  867.  
  868.         } else {
  869.             csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
  870.  
  871.             if (csr & (MUSB_RXCSR_RXPKTRDY
  872.                     | MUSB_RXCSR_DMAENAB
  873.                     | MUSB_RXCSR_H_REQPKT))
  874.                 ERR("broken !rx_reinit, ep%d csr %04x\n",
  875.                         hw_ep->epnum, csr);
  876.  
  877.             /* scrub any stale state, leaving toggle alone */
  878.             csr &= MUSB_RXCSR_DISNYET;
  879.         }
  880.  
  881.         /* kick things off */
  882.  
  883.         if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
  884.             /* candidate for DMA */
  885.             if (dma_channel) {
  886.                 dma_channel->actual_len = 0L;
  887.                 qh->segsize = len;
  888.  
  889.                 /* AUTOREQ is in a DMA register */
  890.                 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  891.                 csr = musb_readw(hw_ep->regs,
  892.                         MUSB_RXCSR);
  893.  
  894.                 /* unless caller treats short rx transfers as
  895.                  * errors, we dare not queue multiple transfers.
  896.                  */
  897.                 dma_ok = dma_controller->channel_program(
  898.                         dma_channel, packet_sz,
  899.                         !(urb->transfer_flags
  900.                             & URB_SHORT_NOT_OK),
  901.                         urb->transfer_dma,
  902.                         qh->segsize);
  903.                 if (!dma_ok) {
  904.                     dma_controller->channel_release(
  905.                             dma_channel);
  906.                     hw_ep->rx_channel = NULL;
  907.                     dma_channel = NULL;
  908.                 } else
  909.                     csr |= MUSB_RXCSR_DMAENAB;
  910.             }
  911.         }
  912.  
  913.         csr |= MUSB_RXCSR_H_REQPKT;
  914.         DBG(7, "RXCSR%d := %04x\n", epnum, csr);
  915.         musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
  916.         csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
  917.     }
  918. }
  919.  
  920.  
  921. /*
  922.  * Service the default endpoint (ep0) as host.
  923.  * Return true until it's time to start the status stage.
  924.  */
  925. static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
  926. {
  927.     bool             more = false;
  928.     u8          *fifo_dest = NULL;
  929.     u16         fifo_count = 0;
  930.     struct musb_hw_ep   *hw_ep = musb->control_ep;
  931.     struct musb_qh      *qh = hw_ep->in_qh;
  932.     struct usb_ctrlrequest  *request;
  933.  
  934.     switch (musb->ep0_stage) {
  935.     case MUSB_EP0_IN:
  936.         fifo_dest = urb->transfer_buffer + urb->actual_length;
  937.         fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
  938.                    urb->actual_length);
  939.         if (fifo_count < len)
  940.             urb->status = -EOVERFLOW;
  941.  
  942.         musb_read_fifo(hw_ep, fifo_count, fifo_dest);
  943.  
  944.         urb->actual_length += fifo_count;
  945.         if (len < qh->maxpacket) {
  946.             /* always terminate on short read; it's
  947.              * rarely reported as an error.
  948.              */
  949.         } else if (urb->actual_length <
  950.                 urb->transfer_buffer_length)
  951.             more = true;
  952.         break;
  953.     case MUSB_EP0_START:
  954.         request = (struct usb_ctrlrequest *) urb->setup_packet;
  955.  
  956.         if (!request->wLength) {
  957.             DBG(4, "start no-DATA\n");
  958.             break;
  959.         } else if (request->bRequestType & USB_DIR_IN) {
  960.             DBG(4, "start IN-DATA\n");
  961.             musb->ep0_stage = MUSB_EP0_IN;
  962.             more = true;
  963.             break;
  964.         } else {
  965.             DBG(4, "start OUT-DATA\n");
  966.             musb->ep0_stage = MUSB_EP0_OUT;
  967.             more = true;
  968.         }
  969.         /* FALLTHROUGH */
  970.     case MUSB_EP0_OUT:
  971.         fifo_count = min_t(size_t, qh->maxpacket,
  972.                    urb->transfer_buffer_length -
  973.                    urb->actual_length);
  974.         if (fifo_count) {
  975.             fifo_dest = (u8 *) (urb->transfer_buffer
  976.                     + urb->actual_length);
  977.             DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
  978.                     fifo_count,
  979.                     (fifo_count == 1) ? "" : "s",
  980.                     fifo_dest);
  981.             musb_write_fifo(hw_ep, fifo_count, fifo_dest);
  982.  
  983.             urb->actual_length += fifo_count;
  984.             more = true;
  985.         }
  986.         break;
  987.     default:
  988.         ERR("bogus ep0 stage %d\n", musb->ep0_stage);
  989.         break;
  990.     }
  991.  
  992.     return more;
  993. }
  994.  
  995. /*
  996.  * Handle default endpoint interrupt as host. Only called in IRQ time
  997.  * from musb_interrupt().
  998.  *
  999.  * called with controller irqlocked
  1000.  */
  1001. irqreturn_t musb_h_ep0_irq(struct musb *musb)
  1002. {
  1003.     struct urb      *urb;
  1004.     u16         csr, len;
  1005.     int         status = 0;
  1006.     void __iomem        *mbase = musb->mregs;
  1007.     struct musb_hw_ep   *hw_ep = musb->control_ep;
  1008.     void __iomem        *epio = hw_ep->regs;
  1009.     struct musb_qh      *qh = hw_ep->in_qh;
  1010.     bool            complete = false;
  1011.     irqreturn_t     retval = IRQ_NONE;
  1012.  
  1013.     /* ep0 only has one queue, "in" */
  1014.     urb = next_urb(qh);
  1015.  
  1016.     musb_ep_select(mbase, 0);
  1017.     csr = musb_readw(epio, MUSB_CSR0);
  1018.     len = (csr & MUSB_CSR0_RXPKTRDY)
  1019.             ? musb_readb(epio, MUSB_COUNT0)
  1020.             : 0;
  1021.  
  1022.     DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
  1023.         csr, qh, len, urb, musb->ep0_stage);
  1024.  
  1025.     /* if we just did status stage, we are done */
  1026.     if (MUSB_EP0_STATUS == musb->ep0_stage) {
  1027.         retval = IRQ_HANDLED;
  1028.         complete = true;
  1029.     }
  1030.  
  1031.     /* prepare status */
  1032.     if (csr & MUSB_CSR0_H_RXSTALL) {
  1033.         DBG(6, "STALLING ENDPOINT\n");
  1034.         status = -EPIPE;
  1035.  
  1036.     } else if (csr & MUSB_CSR0_H_ERROR) {
  1037.         DBG(2, "no response, csr0 %04x\n", csr);
  1038.         status = -EPROTO;
  1039.  
  1040.     } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
  1041.         DBG(2, "control NAK timeout\n");
  1042.  
  1043.         /* NOTE:  this code path would be a good place to PAUSE a
  1044.          * control transfer, if another one is queued, so that
  1045.          * ep0 is more likely to stay busy.
  1046.          *
  1047.          * if (qh->ring.next != &musb->control), then
  1048.          * we have a candidate... NAKing is *NOT* an error
  1049.          */
  1050.         musb_writew(epio, MUSB_CSR0, 0);
  1051.         retval = IRQ_HANDLED;
  1052.     }
  1053.  
  1054.     if (status) {
  1055.         DBG(6, "aborting\n");
  1056.         retval = IRQ_HANDLED;
  1057.         if (urb)
  1058.             urb->status = status;
  1059.         complete = true;
  1060.  
  1061.         /* use the proper sequence to abort the transfer */
  1062.         if (csr & MUSB_CSR0_H_REQPKT) {
  1063.             csr &= ~MUSB_CSR0_H_REQPKT;
  1064.             musb_writew(epio, MUSB_CSR0, csr);
  1065.             csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
  1066.             musb_writew(epio, MUSB_CSR0, csr);
  1067.         } else {
  1068.             csr |= MUSB_CSR0_FLUSHFIFO;
  1069.             musb_writew(epio, MUSB_CSR0, csr);
  1070.             musb_writew(epio, MUSB_CSR0, csr);
  1071.             csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
  1072.             musb_writew(epio, MUSB_CSR0, csr);
  1073.         }
  1074.  
  1075.         musb_writeb(epio, MUSB_NAKLIMIT0, 0);
  1076.  
  1077.         /* clear it */
  1078.         musb_writew(epio, MUSB_CSR0, 0);
  1079.     }
  1080.  
  1081.     if (unlikely(!urb)) {
  1082.         /* stop endpoint since we have no place for its data, this
  1083.          * SHOULD NEVER HAPPEN! */
  1084.         ERR("no URB for end 0\n");
  1085.  
  1086.         musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
  1087.         musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
  1088.         musb_writew(epio, MUSB_CSR0, 0);
  1089.  
  1090.         goto done;
  1091.     }
  1092.  
  1093.     if (!complete) {
  1094.         /* call common logic and prepare response */
  1095.         if (musb_h_ep0_continue(musb, len, urb)) {
  1096.             /* more packets required */
  1097.             csr = (MUSB_EP0_IN == musb->ep0_stage)
  1098.                 ?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
  1099.         } else {
  1100.             /* data transfer complete; perform status phase */
  1101.             if (usb_pipeout(urb->pipe)
  1102.                     || !urb->transfer_buffer_length)
  1103.                 csr = MUSB_CSR0_H_STATUSPKT
  1104.                     | MUSB_CSR0_H_REQPKT;
  1105.             else
  1106.                 csr = MUSB_CSR0_H_STATUSPKT
  1107.                     | MUSB_CSR0_TXPKTRDY;
  1108.  
  1109.             /* flag status stage */
  1110.             musb->ep0_stage = MUSB_EP0_STATUS;
  1111.  
  1112.             DBG(5, "ep0 STATUS, csr %04x\n", csr);
  1113.  
  1114.         }
  1115.         musb_writew(epio, MUSB_CSR0, csr);
  1116.         retval = IRQ_HANDLED;
  1117.     } else
  1118.         musb->ep0_stage = MUSB_EP0_IDLE;
  1119.  
  1120.     /* call completion handler if done */
  1121.     if (complete)
  1122.         musb_advance_schedule(musb, urb, hw_ep, 1);
  1123. done:
  1124.     return retval;
  1125. }
  1126.  
  1127.  
  1128. #ifdef CONFIG_USB_INVENTRA_DMA
  1129.  
  1130. /* Host side TX (OUT) using Mentor DMA works as follows:
  1131.     submit_urb ->
  1132.         - if queue was empty, Program Endpoint
  1133.         - ... which starts DMA to fifo in mode 1 or 0
  1134.  
  1135.     DMA Isr (transfer complete) -> TxAvail()
  1136.         - Stop DMA (~DmaEnab)   (<--- Alert ... currently happens
  1137.                     only in musb_cleanup_urb)
  1138.         - TxPktRdy has to be set in mode 0 or for
  1139.             short packets in mode 1.
  1140. */
  1141.  
  1142. #endif
  1143.  
  1144. /* Service a Tx-Available or dma completion irq for the endpoint */
  1145. void musb_host_tx(struct musb *musb, u8 epnum)
  1146. {
  1147.     int         pipe;
  1148.     bool            done = false;
  1149.     u16         tx_csr;
  1150.     size_t          wLength = 0;
  1151.     u8          *buf = NULL;
  1152.     struct urb      *urb;
  1153.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  1154.     void __iomem        *epio = hw_ep->regs;
  1155.     struct musb_qh      *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
  1156.                                 : hw_ep->out_qh;
  1157.     u32         status = 0;
  1158.     void __iomem        *mbase = musb->mregs;
  1159.     struct dma_channel  *dma;
  1160.  
  1161.     urb = next_urb(qh);
  1162.  
  1163.     musb_ep_select(mbase, epnum);
  1164.     tx_csr = musb_readw(epio, MUSB_TXCSR);
  1165.  
  1166.     /* with CPPI, DMA sometimes triggers "extra" irqs */
  1167.     if (!urb) {
  1168.         DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
  1169.         goto finish;
  1170.     }
  1171.  
  1172.     pipe = urb->pipe;
  1173.     dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
  1174.     DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
  1175.             dma ? ", dma" : "");
  1176.  
  1177.     /* check for errors */
  1178.     if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
  1179.         /* dma was disabled, fifo flushed */
  1180.         DBG(3, "TX end %d stall\n", epnum);
  1181.  
  1182.         /* stall; record URB status */
  1183.         status = -EPIPE;
  1184.  
  1185.     } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
  1186.         /* (NON-ISO) dma was disabled, fifo flushed */
  1187.         DBG(3, "TX 3strikes on ep=%d\n", epnum);
  1188.  
  1189.         status = -ETIMEDOUT;
  1190.  
  1191.     } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
  1192.         DBG(6, "TX end=%d device not responding\n", epnum);
  1193.  
  1194.         /* NOTE:  this code path would be a good place to PAUSE a
  1195.          * transfer, if there's some other (nonperiodic) tx urb
  1196.          * that could use this fifo.  (dma complicates it...)
  1197.          *
  1198.          * if (bulk && qh->ring.next != &hw_ep->out_list), then
  1199.          * we have a candidate... NAKing is *NOT* an error
  1200.          */
  1201.         musb_ep_select(mbase, epnum);
  1202.         musb_writew(epio, MUSB_TXCSR,
  1203.                 MUSB_TXCSR_H_WZC_BITS
  1204.                 | MUSB_TXCSR_TXPKTRDY);
  1205.         goto finish;
  1206.     }
  1207.  
  1208.     if (status) {
  1209.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1210.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1211.             (void) musb->dma_controller->channel_abort(dma);
  1212.         }
  1213.  
  1214.         /* do the proper sequence to abort the transfer in the
  1215.          * usb core; the dma engine should already be stopped.
  1216.          */
  1217.         musb_h_tx_flush_fifo(hw_ep);
  1218.         tx_csr &= ~(MUSB_TXCSR_AUTOSET
  1219.                 | MUSB_TXCSR_DMAENAB
  1220.                 | MUSB_TXCSR_H_ERROR
  1221.                 | MUSB_TXCSR_H_RXSTALL
  1222.                 | MUSB_TXCSR_H_NAKTIMEOUT
  1223.                 );
  1224.  
  1225.         musb_ep_select(mbase, epnum);
  1226.         musb_writew(epio, MUSB_TXCSR, tx_csr);
  1227.         /* REVISIT may need to clear FLUSHFIFO ... */
  1228.         musb_writew(epio, MUSB_TXCSR, tx_csr);
  1229.         musb_writeb(epio, MUSB_TXINTERVAL, 0);
  1230.  
  1231.         done = true;
  1232.     }
  1233.  
  1234.     /* second cppi case */
  1235.     if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1236.         DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
  1237.         goto finish;
  1238.  
  1239.     }
  1240.  
  1241.     /* REVISIT this looks wrong... */
  1242.     if (!status || dma || usb_pipeisoc(pipe)) {
  1243.         if (dma)
  1244.             wLength = dma->actual_len;
  1245.         else
  1246.             wLength = qh->segsize;
  1247.         qh->offset += wLength;
  1248.  
  1249.         if (usb_pipeisoc(pipe)) {
  1250.             struct usb_iso_packet_descriptor    *d;
  1251.  
  1252.             d = urb->iso_frame_desc + qh->iso_idx;
  1253.             d->actual_length = qh->segsize;
  1254.             if (++qh->iso_idx >= urb->number_of_packets) {
  1255.                 done = true;
  1256.             } else {
  1257.                 d++;
  1258.                 buf = urb->transfer_buffer + d->offset;
  1259.                 wLength = d->length;
  1260.             }
  1261.         } else if (dma) {
  1262.             done = true;
  1263.         } else {
  1264.             /* see if we need to send more data, or ZLP */
  1265.             if (qh->segsize < qh->maxpacket)
  1266.                 done = true;
  1267.             else if (qh->offset == urb->transfer_buffer_length
  1268.                     && !(urb->transfer_flags
  1269.                         & URB_ZERO_PACKET))
  1270.                 done = true;
  1271.             if (!done) {
  1272.                 buf = urb->transfer_buffer
  1273.                         + qh->offset;
  1274.                 wLength = urb->transfer_buffer_length
  1275.                         - qh->offset;
  1276.             }
  1277.         }
  1278.     }
  1279.  
  1280.     /* urb->status != -EINPROGRESS means request has been faulted,
  1281.      * so we must abort this transfer after cleanup
  1282.      */
  1283.     if (urb->status != -EINPROGRESS) {
  1284.         done = true;
  1285.         if (status == 0)
  1286.             status = urb->status;
  1287.     }
  1288.  
  1289.     if (done) {
  1290.         /* set status */
  1291.         urb->status = status;
  1292.         urb->actual_length = qh->offset;
  1293.         musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
  1294.  
  1295.     } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
  1296.         /* WARN_ON(!buf); */
  1297.  
  1298.         /* REVISIT:  some docs say that when hw_ep->tx_double_buffered,
  1299.          * (and presumably, fifo is not half-full) we should write TWO
  1300.          * packets before updating TXCSR ... other docs disagree ...
  1301.          */
  1302.         /* PIO:  start next packet in this URB */
  1303.         if (wLength > qh->maxpacket)
  1304.             wLength = qh->maxpacket;
  1305.         musb_write_fifo(hw_ep, wLength, buf);
  1306.         qh->segsize = wLength;
  1307.  
  1308.         musb_ep_select(mbase, epnum);
  1309.         musb_writew(epio, MUSB_TXCSR,
  1310.                 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
  1311.     } else
  1312.         DBG(1, "not complete, but dma enabled?\n");
  1313.  
  1314. finish:
  1315.     return;
  1316. }
  1317.  
  1318.  
  1319. #ifdef CONFIG_USB_INVENTRA_DMA
  1320.  
  1321. /* Host side RX (IN) using Mentor DMA works as follows:
  1322.     submit_urb ->
  1323.         - if queue was empty, ProgramEndpoint
  1324.         - first IN token is sent out (by setting ReqPkt)
  1325.     LinuxIsr -> RxReady()
  1326.     /\  => first packet is received
  1327.     |   - Set in mode 0 (DmaEnab, ~ReqPkt)
  1328.     |       -> DMA Isr (transfer complete) -> RxReady()
  1329.     |           - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
  1330.     |           - if urb not complete, send next IN token (ReqPkt)
  1331.     |              |        else complete urb.
  1332.     |              |
  1333.     ---------------------------
  1334.  *
  1335.  * Nuances of mode 1:
  1336.  *  For short packets, no ack (+RxPktRdy) is sent automatically
  1337.  *  (even if AutoClear is ON)
  1338.  *  For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
  1339.  *  automatically => major problem, as collecting the next packet becomes
  1340.  *  difficult. Hence mode 1 is not used.
  1341.  *
  1342.  * REVISIT
  1343.  *  All we care about at this driver level is that
  1344.  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
  1345.  *       (b) termination conditions are: short RX, or buffer full;
  1346.  *       (c) fault modes include
  1347.  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
  1348.  *             (and that endpoint's dma queue stops immediately)
  1349.  *           - overflow (full, PLUS more bytes in the terminal packet)
  1350.  *
  1351.  *  So for example, usb-storage sets URB_SHORT_NOT_OK, and would
  1352.  *  thus be a great candidate for using mode 1 ... for all but the
  1353.  *  last packet of one URB's transfer.
  1354.  */
  1355.  
  1356. #endif
  1357.  
  1358. /* Schedule next qh from ep->in_list and add the current qh at tail
  1359.  * to avoid endpoint starvation.
  1360.  */
  1361. static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
  1362. {
  1363.     struct dma_channel  *dma;
  1364.     struct urb  *urb;
  1365.     void __iomem    *mbase = musb->mregs;
  1366.     void __iomem    *epio = ep->regs;
  1367.     struct musb_qh  *cur_qh, *next_qh;
  1368.     u16 rx_csr;
  1369.  
  1370.     musb_ep_select(mbase, ep->epnum);
  1371.     dma = is_dma_capable() ? ep->rx_channel : NULL;
  1372.  
  1373.     /* clear nak timeout bit */
  1374.     rx_csr = musb_readw(epio, MUSB_RXCSR);
  1375.     rx_csr &= ~MUSB_RXCSR_DATAERROR;
  1376.     musb_writew(epio, MUSB_RXCSR, rx_csr);
  1377.  
  1378.     cur_qh = first_qh(&ep->in_list);
  1379.     if (cur_qh) {
  1380.         urb = next_urb(cur_qh);
  1381.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1382.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1383.             musb->dma_controller->channel_abort(dma);
  1384.             urb->actual_length += dma->actual_len;
  1385.             dma->actual_len = 0L;
  1386.         }
  1387.         musb_save_toggle(ep, 1, urb);
  1388.  
  1389.         /* delete cur_qh and add to tail to ep->in_list */
  1390.         list_del(&cur_qh->ring);
  1391.         list_add_tail(&cur_qh->ring, &ep->in_list);
  1392.  
  1393.         /* get the next qh from ep->in_list */
  1394.         next_qh = first_qh(&ep->in_list);
  1395.  
  1396.         /* set rx_reinit and schedule the next qh */
  1397.         ep->rx_reinit = 1;
  1398.         musb_start_urb(musb, 1, next_qh);
  1399.     }
  1400. }
  1401.  
  1402. /*
  1403.  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
  1404.  * and high-bandwidth IN transfer cases.
  1405.  */
  1406. void musb_host_rx(struct musb *musb, u8 epnum)
  1407. {
  1408.     struct urb      *urb;
  1409.     struct musb_hw_ep   *hw_ep = musb->endpoints + epnum;
  1410.     void __iomem        *epio = hw_ep->regs;
  1411.     struct musb_qh      *qh = hw_ep->in_qh;
  1412.     size_t          xfer_len;
  1413.     void __iomem        *mbase = musb->mregs;
  1414.     int         pipe;
  1415.     u16         rx_csr, val;
  1416.     bool            iso_err = false;
  1417.     bool            done = false;
  1418.     u32         status;
  1419.     struct dma_channel  *dma;
  1420.  
  1421.     musb_ep_select(mbase, epnum);
  1422.  
  1423.     urb = next_urb(qh);
  1424.     dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
  1425.     status = 0;
  1426.     xfer_len = 0;
  1427.  
  1428.     rx_csr = musb_readw(epio, MUSB_RXCSR);
  1429.     val = rx_csr;
  1430.  
  1431.     if (unlikely(!urb)) {
  1432.         /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
  1433.          * usbtest #11 (unlinks) triggers it regularly, sometimes
  1434.          * with fifo full.  (Only with DMA??)
  1435.          */
  1436.         DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
  1437.             musb_readw(epio, MUSB_RXCOUNT));
  1438.         musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
  1439.         return;
  1440.     }
  1441.  
  1442.     pipe = urb->pipe;
  1443.  
  1444.     DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
  1445.         epnum, rx_csr, urb->actual_length,
  1446.         dma ? dma->actual_len : 0);
  1447.  
  1448.     /* check for errors, concurrent stall & unlink is not really
  1449.      * handled yet! */
  1450.     if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
  1451.         DBG(3, "RX end %d STALL\n", epnum);
  1452.  
  1453.         /* stall; record URB status */
  1454.         status = -EPIPE;
  1455.  
  1456.     } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
  1457.         DBG(3, "end %d RX proto error\n", epnum);
  1458.  
  1459.         status = -EPROTO;
  1460.         musb_writeb(epio, MUSB_RXINTERVAL, 0);
  1461.  
  1462.     } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
  1463.  
  1464.         if (USB_ENDPOINT_XFER_ISOC != qh->type) {
  1465.             /* NOTE this code path would be a good place to PAUSE a
  1466.              * transfer, if there's some other (nonperiodic) rx urb
  1467.              * that could use this fifo.  (dma complicates it...)
  1468.              *
  1469.              * if (bulk && qh->ring.next != &hw_ep->in_list), then
  1470.              * we have a candidate... NAKing is *NOT* an error
  1471.              */
  1472.             DBG(6, "RX end %d NAK timeout\n", epnum);
  1473.             if (usb_pipebulk(urb->pipe) && qh->mux == 1 &&
  1474.                 !list_is_singular(&hw_ep->in_list)) {
  1475.                 musb_bulk_nak_timeout(musb, hw_ep);
  1476.                 return;
  1477.             }
  1478.             musb_ep_select(mbase, epnum);
  1479.             rx_csr &= ~MUSB_RXCSR_DATAERROR;
  1480.             musb_writew(epio, MUSB_RXCSR, rx_csr);
  1481.  
  1482.             goto finish;
  1483.         } else {
  1484.             DBG(4, "RX end %d ISO data error\n", epnum);
  1485.             /* packet error reported later */
  1486.             iso_err = true;
  1487.         }
  1488.     } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
  1489.         DBG(3, "end %d Highbandwidth  incomplete ISO packet received\n",
  1490.                 epnum);
  1491.         status = -EPROTO;
  1492.     }
  1493.  
  1494.     /* faults abort the transfer */
  1495.     if (status) {
  1496.         /* clean up dma and collect transfer count */
  1497.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1498.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1499.             (void) musb->dma_controller->channel_abort(dma);
  1500.             xfer_len = dma->actual_len;
  1501.         }
  1502.         musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
  1503.         musb_writeb(epio, MUSB_RXINTERVAL, 0);
  1504.         done = true;
  1505.         goto finish;
  1506.     }
  1507.  
  1508.     if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
  1509.         /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
  1510.         ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
  1511.         goto finish;
  1512.     }
  1513.  
  1514.     /* thorough shutdown for now ... given more precise fault handling
  1515.      * and better queueing support, we might keep a DMA pipeline going
  1516.      * while processing this irq for earlier completions.
  1517.      */
  1518.  
  1519.     /* FIXME this is _way_ too much in-line logic for Mentor DMA */
  1520.  
  1521.     if (!musb_inventra_dma() && (rx_csr & MUSB_RXCSR_H_REQPKT))  {
  1522.         /* REVISIT this happened for a while on some short reads...
  1523.          * the cleanup still needs investigation... looks bad...
  1524.          * and also duplicates dma cleanup code above ... plus,
  1525.          * shouldn't this be the "half full" double buffer case?
  1526.          */
  1527.         if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1528.             dma->status = MUSB_DMA_STATUS_CORE_ABORT;
  1529.             (void) musb->dma_controller->channel_abort(dma);
  1530.             xfer_len = dma->actual_len;
  1531.             done = true;
  1532.         }
  1533.  
  1534.         DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
  1535.                 xfer_len, dma ? ", dma" : "");
  1536.         rx_csr &= ~MUSB_RXCSR_H_REQPKT;
  1537.  
  1538.         musb_ep_select(mbase, epnum);
  1539.         musb_writew(epio, MUSB_RXCSR,
  1540.                 MUSB_RXCSR_H_WZC_BITS | rx_csr);
  1541.     }
  1542.  
  1543.     if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
  1544.         xfer_len = dma->actual_len;
  1545.  
  1546.         val &= ~(MUSB_RXCSR_DMAENAB
  1547.             | MUSB_RXCSR_H_AUTOREQ
  1548.             | MUSB_RXCSR_AUTOCLEAR
  1549.             | MUSB_RXCSR_RXPKTRDY);
  1550.         musb_writew(hw_ep->regs, MUSB_RXCSR, val);
  1551.  
  1552. #ifdef CONFIG_USB_INVENTRA_DMA
  1553.         if (usb_pipeisoc(pipe)) {
  1554.             struct usb_iso_packet_descriptor *d;
  1555.  
  1556.             d = urb->iso_frame_desc + qh->iso_idx;
  1557.             d->actual_length = xfer_len;
  1558.  
  1559.             /* even if there was an error, we did the dma
  1560.              * for iso_frame_desc->length
  1561.              */
  1562.             if (d->status != EILSEQ && d->status != -EOVERFLOW)
  1563.                 d->status = 0;
  1564.  
  1565.             if (++qh->iso_idx >= urb->number_of_packets)
  1566.                 done = true;
  1567.             else
  1568.                 done = false;
  1569.  
  1570.         } else  {
  1571.         /* done if urb buffer is full or short packet is recd */
  1572.         done = (urb->actual_length + xfer_len >=
  1573.                 urb->transfer_buffer_length
  1574.             || dma->actual_len < qh->maxpacket);
  1575.         }
  1576.  
  1577.         /* send IN token for next packet, without AUTOREQ */
  1578.         if (!done) {
  1579.             val |= MUSB_RXCSR_H_REQPKT;
  1580.             musb_writew(epio, MUSB_RXCSR,
  1581.                 MUSB_RXCSR_H_WZC_BITS | val);
  1582.         }
  1583.  
  1584.         DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
  1585.             done ? "off" : "reset",
  1586.             musb_readw(epio, MUSB_RXCSR),
  1587.             musb_readw(epio, MUSB_RXCOUNT));
  1588. #else
  1589.         done = true;
  1590. #endif
  1591.     } else if (urb->status == -EINPROGRESS) {
  1592.         /* if no errors, be sure a packet is ready for unloading */
  1593.         if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
  1594.             status = -EPROTO;
  1595.             ERR("Rx interrupt with no errors or packet!\n");
  1596.  
  1597.             /* FIXME this is another "SHOULD NEVER HAPPEN" */
  1598.  
  1599. /* SCRUB (RX) */
  1600.             /* do the proper sequence to abort the transfer */
  1601.             musb_ep_select(mbase, epnum);
  1602.             val &= ~MUSB_RXCSR_H_REQPKT;
  1603.             musb_writew(epio, MUSB_RXCSR, val);
  1604.             goto finish;
  1605.         }
  1606.  
  1607.         /* we are expecting IN packets */
  1608.         if (musb_inventra_dma() && dma) {
  1609.             struct dma_controller   *c;
  1610.             u16         rx_count;
  1611.             int         ret, length;
  1612.             dma_addr_t      buf;
  1613.  
  1614.             rx_count = musb_readw(epio, MUSB_RXCOUNT);
  1615.  
  1616.             DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
  1617.                     epnum, rx_count,
  1618.                     urb->transfer_dma
  1619.                         + urb->actual_length,
  1620.                     qh->offset,
  1621.                     urb->transfer_buffer_length);
  1622.  
  1623.             c = musb->dma_controller;
  1624.  
  1625.             if (usb_pipeisoc(pipe)) {
  1626.                 int status = 0;
  1627.                 struct usb_iso_packet_descriptor *d;
  1628.  
  1629.                 d = urb->iso_frame_desc + qh->iso_idx;
  1630.  
  1631.                 if (iso_err) {
  1632.                     status = -EILSEQ;
  1633.                     urb->error_count++;
  1634.                 }
  1635.                 if (rx_count > d->length) {
  1636.                     if (status == 0) {
  1637.                         status = -EOVERFLOW;
  1638.                         urb->error_count++;
  1639.                     }
  1640.                     DBG(2, "** OVERFLOW %d into %d\n",\
  1641.                         rx_count, d->length);
  1642.  
  1643.                     length = d->length;
  1644.                 } else
  1645.                     length = rx_count;
  1646.                 d->status = status;
  1647.                 buf = urb->transfer_dma + d->offset;
  1648.             } else {
  1649.                 length = rx_count;
  1650.                 buf = urb->transfer_dma +
  1651.                         urb->actual_length;
  1652.             }
  1653.  
  1654.             dma->desired_mode = 0;
  1655. #ifdef USE_MODE1
  1656.             /* because of the issue below, mode 1 will
  1657.              * only rarely behave with correct semantics.
  1658.              */
  1659.             if ((urb->transfer_flags &
  1660.                         URB_SHORT_NOT_OK)
  1661.                 && (urb->transfer_buffer_length -
  1662.                         urb->actual_length)
  1663.                     > qh->maxpacket)
  1664.                 dma->desired_mode = 1;
  1665.             if (rx_count < hw_ep->max_packet_sz_rx) {
  1666.                 length = rx_count;
  1667.                 dma->bDesiredMode = 0;
  1668.             } else {
  1669.                 length = urb->transfer_buffer_length;
  1670.             }
  1671. #endif
  1672.  
  1673. /* Disadvantage of using mode 1:
  1674.  *  It's basically usable only for mass storage class; essentially all
  1675.  *  other protocols also terminate transfers on short packets.
  1676.  *
  1677.  * Details:
  1678.  *  An extra IN token is sent at the end of the transfer (due to AUTOREQ)
  1679.  *  If you try to use mode 1 for (transfer_buffer_length - 512), and try
  1680.  *  to use the extra IN token to grab the last packet using mode 0, then
  1681.  *  the problem is that you cannot be sure when the device will send the
  1682.  *  last packet and RxPktRdy set. Sometimes the packet is recd too soon
  1683.  *  such that it gets lost when RxCSR is re-set at the end of the mode 1
  1684.  *  transfer, while sometimes it is recd just a little late so that if you
  1685.  *  try to configure for mode 0 soon after the mode 1 transfer is
  1686.  *  completed, you will find rxcount 0. Okay, so you might think why not
  1687.  *  wait for an interrupt when the pkt is recd. Well, you won't get any!
  1688.  */
  1689.  
  1690.             val = musb_readw(epio, MUSB_RXCSR);
  1691.             val &= ~MUSB_RXCSR_H_REQPKT;
  1692.  
  1693.             if (dma->desired_mode == 0)
  1694.                 val &= ~MUSB_RXCSR_H_AUTOREQ;
  1695.             else
  1696.                 val |= MUSB_RXCSR_H_AUTOREQ;
  1697.  
  1698.                        val |= MUSB_RXCSR_DMAENAB;
  1699.  
  1700.                        /* autoclear shouldn't be set in high bandwidth */
  1701.                        if (qh->hb_mult == 1)
  1702.                                val |= MUSB_RXCSR_AUTOCLEAR;
  1703.  
  1704.             musb_writew(epio, MUSB_RXCSR,
  1705.                 MUSB_RXCSR_H_WZC_BITS | val);
  1706.  
  1707.             /* REVISIT if when actual_length != 0,
  1708.              * transfer_buffer_length needs to be
  1709.              * adjusted first...
  1710.              */
  1711.             ret = c->channel_program(
  1712.                 dma, qh->maxpacket,
  1713.                 dma->desired_mode, buf, length);
  1714.  
  1715.             if (!ret) {
  1716.                 c->channel_release(dma);
  1717.                 hw_ep->rx_channel = NULL;
  1718.                 dma = NULL;
  1719.                 /* REVISIT reset CSR */
  1720.             }
  1721.         }
  1722.  
  1723.         if (!dma) {
  1724.             done = musb_host_packet_rx(musb, urb,
  1725.                     epnum, iso_err);
  1726.             DBG(6, "read %spacket\n", done ? "last " : "");
  1727.         }
  1728.     }
  1729.  
  1730. finish:
  1731.     urb->actual_length += xfer_len;
  1732.     qh->offset += xfer_len;
  1733.     if (done) {
  1734.         if (urb->status == -EINPROGRESS)
  1735.             urb->status = status;
  1736.         musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
  1737.     }
  1738. }
  1739.  
  1740. /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
  1741.  * the software schedule associates multiple such nodes with a given
  1742.  * host side hardware endpoint + direction; scheduling may activate
  1743.  * that hardware endpoint.
  1744.  */
  1745. static int musb_schedule(
  1746.     struct musb     *musb,
  1747.     struct musb_qh      *qh,
  1748.     int         is_in)
  1749. {
  1750.     int         idle;
  1751.     int         best_diff;
  1752.     int         best_end, epnum;
  1753.     struct musb_hw_ep   *hw_ep = NULL;
  1754.     struct list_head    *head = NULL;
  1755.     u16         maxpacket;
  1756.  
  1757.     /* use fixed hardware for control and bulk */
  1758.     if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
  1759.         hw_ep = musb->control_ep;
  1760.         head = &hw_ep->in_list;
  1761.         goto success;
  1762.     }
  1763.  
  1764.     /* else, periodic transfers get muxed to other endpoints */
  1765.  
  1766.     /* FIXME this doesn't consider direction, so it can only
  1767.      * work for one half of the endpoint hardware, and assumes
  1768.      * the previous cases handled all non-shared endpoints...
  1769.      */
  1770.  
  1771.     /* we know this qh hasn't been scheduled, so all we need to do
  1772.      * is choose which hardware endpoint to put it on ...
  1773.      *
  1774.      * REVISIT what we really want here is a regular schedule tree
  1775.      * like e.g. OHCI uses, but for now musb->periodic is just an
  1776.      * array of the _single_ logical endpoint associated with a
  1777.      * given physical one (identity mapping logical->physical).
  1778.      *
  1779.      * that simplistic approach makes TT scheduling a lot simpler;
  1780.      * there is none, and thus none of its complexity...
  1781.      */
  1782.     best_diff = 4096;
  1783.     best_end = -1;
  1784.  
  1785.     if (qh->maxpacket & (1 << 11))
  1786.         maxpacket = 2 * (qh->maxpacket & 0x7ff);
  1787.     else if (qh->maxpacket & (1 << 12))
  1788.         maxpacket = 3 * (qh->maxpacket & 0x7ff);
  1789.     else
  1790.         maxpacket = (qh->maxpacket & 0x7ff);
  1791.  
  1792.     for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
  1793.         int diff;
  1794.  
  1795.         if (musb->periodic[epnum])
  1796.             continue;
  1797.         hw_ep = &musb->endpoints[epnum];
  1798.         if (hw_ep == musb->bulk_ep)
  1799.             continue;
  1800.  
  1801.         if (is_in)
  1802.             diff = hw_ep->max_packet_sz_rx;
  1803.         else
  1804.             diff = hw_ep->max_packet_sz_tx;
  1805.         diff -= (qh->maxpacket * qh->hb_mult);
  1806.  
  1807.         if (diff >= 0 && best_diff > diff) {
  1808.             best_diff = diff;
  1809.             best_end = epnum;
  1810.         }
  1811.     }
  1812.     /* use bulk reserved ep1 if no other ep is free */
  1813.     if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
  1814.         hw_ep = musb->bulk_ep;
  1815.         if (is_in)
  1816.             head = &hw_ep->in_list;
  1817.         else
  1818.             head = &hw_ep->out_list;
  1819.         /* Enable bulk NAK time out scheme when bulk requests are
  1820.          * multiplxed. This scheme doesn't work in high speed to full
  1821.          * speed scenario as NAK interrupts are not coming from a
  1822.          * full speed device connected to a high speed device.
  1823.          * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
  1824.          * 4 (8 frame or 8ms) for FS device.
  1825.          */
  1826.         if (is_in && qh->dev)
  1827.             qh->intv_reg =
  1828.                 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
  1829.         goto success;
  1830.     } else if (best_end < 0) {
  1831.         return -ENOSPC;
  1832.     }
  1833.  
  1834.     idle = 1;
  1835.     qh->mux = 0;
  1836.     hw_ep = musb->endpoints + best_end;
  1837.     musb->periodic[best_end] = qh;
  1838.     DBG(4, "qh %p periodic slot %d\n", qh, best_end);
  1839. success:
  1840.     if (head) {
  1841.         idle = list_empty(head);
  1842.         list_add_tail(&qh->ring, head);
  1843.         qh->mux = 1;
  1844.     }
  1845.     qh->hw_ep = hw_ep;
  1846.     qh->hep->hcpriv = qh;
  1847.     if (idle)
  1848.         musb_start_urb(musb, is_in, qh);
  1849.     return 0;
  1850. }
  1851.  
  1852. static int musb_urb_enqueue(
  1853.     struct usb_hcd          *hcd,
  1854.     struct urb          *urb,
  1855.     gfp_t               mem_flags)
  1856. {
  1857.     unsigned long           flags;
  1858.     struct musb         *musb = hcd_to_musb(hcd);
  1859.     struct usb_host_endpoint    *hep = urb->ep;
  1860.     struct musb_qh          *qh;
  1861.     struct usb_endpoint_descriptor  *epd = &hep->desc;
  1862.     int             ret;
  1863.     unsigned            type_reg;
  1864.     unsigned            interval;
  1865.  
  1866.     /* host role must be active */
  1867.     if (!is_host_active(musb) || !musb->is_active)
  1868.         return -ENODEV;
  1869.  
  1870.     spin_lock_irqsave(&musb->lock, flags);
  1871.     ret = usb_hcd_link_urb_to_ep(hcd, urb);
  1872.     qh = ret ? NULL : hep->hcpriv;
  1873.     if (qh)
  1874.         urb->hcpriv = qh;
  1875.     spin_unlock_irqrestore(&musb->lock, flags);
  1876.  
  1877.     /* DMA mapping was already done, if needed, and this urb is on
  1878.      * hep->urb_list now ... so we're done, unless hep wasn't yet
  1879.      * scheduled onto a live qh.
  1880.      *
  1881.      * REVISIT best to keep urb->hcpriv valid until the endpoint gets
  1882.      * disabled, testing for empty qh->ring and avoiding qh setup costs
  1883.      * except for the first urb queued after a config change.
  1884.      */
  1885.     if (qh || ret)
  1886.         return ret;
  1887.  
  1888.     /* Allocate and initialize qh, minimizing the work done each time
  1889.      * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
  1890.      *
  1891.      * REVISIT consider a dedicated qh kmem_cache, so it's harder
  1892.      * for bugs in other kernel code to break this driver...
  1893.      */
  1894.     qh = kzalloc(sizeof *qh, mem_flags);
  1895.     if (!qh) {
  1896.         spin_lock_irqsave(&musb->lock, flags);
  1897.         usb_hcd_unlink_urb_from_ep(hcd, urb);
  1898.         spin_unlock_irqrestore(&musb->lock, flags);
  1899.         return -ENOMEM;
  1900.     }
  1901.  
  1902.     qh->hep = hep;
  1903.     qh->dev = urb->dev;
  1904.     INIT_LIST_HEAD(&qh->ring);
  1905.     qh->is_ready = 1;
  1906.  
  1907.     qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
  1908.     qh->type = usb_endpoint_type(epd);
  1909.     /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
  1910.          * Some musb cores don't support high bandwidth ISO transfers; and
  1911.          * we don't (yet!) support high bandwidth interrupt transfers.
  1912.          */
  1913.         qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
  1914.         if (qh->hb_mult > 1) {
  1915.         int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
  1916.  
  1917.         if (ok)
  1918.             ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
  1919.                                || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
  1920.         if (!ok) {
  1921.             ret = -EMSGSIZE;
  1922.             goto done;
  1923.         }
  1924.         qh->maxpacket &= 0x7ff;
  1925.     }
  1926.     qh->epnum = usb_endpoint_num(epd);
  1927.    
  1928.  
  1929.     /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
  1930.     qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
  1931.  
  1932.     /* precompute rxtype/txtype/type0 register */
  1933.     type_reg = (qh->type << 4) | qh->epnum;
  1934.     switch (urb->dev->speed) {
  1935.     case USB_SPEED_LOW:
  1936.         type_reg |= 0xc0;
  1937.         break;
  1938.     case USB_SPEED_FULL:
  1939.         type_reg |= 0x80;
  1940.         break;
  1941.     default:
  1942.         type_reg |= 0x40;
  1943.     }
  1944.     qh->type_reg = type_reg;
  1945.  
  1946.     /* Precompute RXINTERVAL/TXINTERVAL register */
  1947.     switch (qh->type) {
  1948.     case USB_ENDPOINT_XFER_INT:
  1949.         /*
  1950.          * Full/low speeds use the  linear encoding,
  1951.          * high speed uses the logarithmic encoding.
  1952.          */
  1953.         if (urb->dev->speed <= USB_SPEED_FULL) {
  1954.             interval = max_t(u8, epd->bInterval, 1);
  1955.             break;
  1956.         }
  1957.         /* FALLTHROUGH */
  1958.     case USB_ENDPOINT_XFER_ISOC:
  1959.         /* ISO always uses logarithmic encoding */
  1960.         interval = min_t(u8, epd->bInterval, 16);
  1961.         break;
  1962.     default:
  1963.         /* REVISIT we actually want to use NAK limits, hinting to the
  1964.          * transfer scheduling logic to try some other qh, e.g. try
  1965.          * for 2 msec first:
  1966.          *
  1967.          * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
  1968.          *
  1969.          * The downside of disabling this is that transfer scheduling
  1970.          * gets VERY unfair for nonperiodic transfers; a misbehaving
  1971.          * peripheral could make that hurt.  Or for reads, one that's
  1972.          * perfectly normal:  network and other drivers keep reads
  1973.          * posted at all times, having one pending for a week should
  1974.          * be perfectly safe.
  1975.          *
  1976.          * The upside of disabling it is avoidng transfer scheduling
  1977.          * code to put this aside for while.
  1978.          */
  1979.         interval = 0;
  1980.     }
  1981.     qh->intv_reg = interval;
  1982.  
  1983.     /* precompute addressing for external hub/tt ports */
  1984.     if (musb->is_multipoint) {
  1985.         struct usb_device   *parent = urb->dev->parent;
  1986.  
  1987.         if (parent != hcd->self.root_hub) {
  1988.             qh->h_addr_reg = (u8) parent->devnum;
  1989.  
  1990.             /* set up tt info if needed */
  1991.             if (urb->dev->tt) {
  1992.                 qh->h_port_reg = (u8) urb->dev->ttport;
  1993.                 if (urb->dev->tt->hub)
  1994.                     qh->h_addr_reg =
  1995.                         (u8) urb->dev->tt->hub->devnum;
  1996.                 if (urb->dev->tt->multi)
  1997.                     qh->h_addr_reg |= 0x80;
  1998.             }
  1999.         }
  2000.     }
  2001.  
  2002.     /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
  2003.      * until we get real dma queues (with an entry for each urb/buffer),
  2004.      * we only have work to do in the former case.
  2005.      */
  2006.     spin_lock_irqsave(&musb->lock, flags);
  2007.     if (hep->hcpriv) {
  2008.         /* some concurrent activity submitted another urb to hep...
  2009.          * odd, rare, error prone, but legal.
  2010.          */
  2011.         kfree(qh);
  2012.         ret = 0;
  2013.     } else
  2014.         ret = musb_schedule(musb, qh,
  2015.                 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
  2016.  
  2017.     if (ret == 0) {
  2018.         urb->hcpriv = qh;
  2019.         /* FIXME set urb->start_frame for iso/intr, it's tested in
  2020.          * musb_start_urb(), but otherwise only konicawc cares ...
  2021.          */
  2022.     }
  2023.     spin_unlock_irqrestore(&musb->lock, flags);
  2024. done:
  2025.     if (ret != 0) {
  2026.         spin_lock_irqsave(&musb->lock, flags);
  2027.         usb_hcd_unlink_urb_from_ep(hcd, urb);
  2028.         spin_unlock_irqrestore(&musb->lock, flags);
  2029.         kfree(qh);
  2030.     }
  2031.     return ret;
  2032. }
  2033.  
  2034.  
  2035. /*
  2036.  * abort a transfer that's at the head of a hardware queue.
  2037.  * called with controller locked, irqs blocked
  2038.  * that hardware queue advances to the next transfer, unless prevented
  2039.  */
  2040. static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
  2041. {
  2042.     struct musb_hw_ep   *ep = qh->hw_ep;
  2043.     void __iomem        *epio = ep->regs;
  2044.     unsigned        hw_end = ep->epnum;
  2045.     void __iomem        *regs = ep->musb->mregs;
  2046.     u16         csr;
  2047.     int         status = 0;
  2048.  
  2049.     musb_ep_select(regs, hw_end);
  2050.  
  2051.     if (is_dma_capable()) {
  2052.         struct dma_channel  *dma;
  2053.  
  2054.         dma = is_in ? ep->rx_channel : ep->tx_channel;
  2055.         if (dma) {
  2056.             status = ep->musb->dma_controller->channel_abort(dma);
  2057.             DBG_nonverb(status ? 1 : 3,
  2058.                 "abort %cX%d DMA for urb %p --> %d\n",
  2059.                 is_in ? 'R' : 'T', ep->epnum,
  2060.                 urb, status);
  2061.             urb->actual_length += dma->actual_len;
  2062.         }
  2063.     }
  2064.  
  2065.     /* turn off DMA requests, discard state, stop polling ... */
  2066.     if (is_in) {
  2067.         /* giveback saves bulk toggle */
  2068.         csr = musb_h_flush_rxfifo(ep, 0);
  2069.  
  2070.         /* REVISIT we still get an irq; should likely clear the
  2071.          * endpoint's irq status here to avoid bogus irqs.
  2072.          * clearing that status is platform-specific...
  2073.          */
  2074.     } else {
  2075.         musb_h_tx_flush_fifo(ep);
  2076.         csr = musb_readw(epio, MUSB_TXCSR);
  2077.         csr &= ~(MUSB_TXCSR_AUTOSET
  2078.             | MUSB_TXCSR_DMAENAB
  2079.             | MUSB_TXCSR_H_RXSTALL
  2080.             | MUSB_TXCSR_H_NAKTIMEOUT
  2081.             | MUSB_TXCSR_H_ERROR
  2082.             | MUSB_TXCSR_TXPKTRDY);
  2083.         musb_writew(epio, MUSB_TXCSR, csr);
  2084.         /* REVISIT may need to clear FLUSHFIFO ... */
  2085.         musb_writew(epio, MUSB_TXCSR, csr);
  2086.         /* flush cpu writebuffer */
  2087.         csr = musb_readw(epio, MUSB_TXCSR);
  2088.     }
  2089.     if (status == 0)
  2090.         musb_advance_schedule(ep->musb, urb, ep, is_in);
  2091.     return status;
  2092. }
  2093.  
  2094. static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  2095. {
  2096.     struct musb     *musb = hcd_to_musb(hcd);
  2097.     struct musb_qh      *qh;
  2098.     struct list_head    *sched;
  2099.     unsigned long       flags;
  2100.     int         ret;
  2101.  
  2102.     DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
  2103.             usb_pipedevice(urb->pipe),
  2104.             usb_pipeendpoint(urb->pipe),
  2105.             usb_pipein(urb->pipe) ? "in" : "out");
  2106.  
  2107.     spin_lock_irqsave(&musb->lock, flags);
  2108.     ret = usb_hcd_check_unlink_urb(hcd, urb, status);
  2109.     if (ret)
  2110.         goto done;
  2111.  
  2112.     qh = urb->hcpriv;
  2113.     if (!qh)
  2114.         goto done;
  2115.  
  2116.     /* Any URB not actively programmed into endpoint hardware can be
  2117.      * immediately given back; that's any URB not at the head of an
  2118.      * endpoint queue, unless someday we get real DMA queues.  And even
  2119.      * if it's at the head, it might not be known to the hardware...
  2120.      *
  2121.      * Otherwise abort current transfer, pending dma, etc.; urb->status
  2122.      * has already been updated.  This is a synchronous abort; it'd be
  2123.      * OK to hold off until after some IRQ, though.
  2124.      */
  2125.     if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
  2126.         ret = -EINPROGRESS;
  2127.     else {
  2128.         switch (qh->type) {
  2129.         case USB_ENDPOINT_XFER_CONTROL:
  2130.             sched = &musb->control_ep->in_list;
  2131.             break;
  2132.         case USB_ENDPOINT_XFER_BULK:
  2133.             if (qh->mux == 1) {
  2134.                 if (usb_pipein(urb->pipe))
  2135.                     sched = &musb->bulk_ep->in_list;
  2136.                 else
  2137.                     sched = &musb->bulk_ep->out_list;
  2138.                 break;
  2139.             }
  2140.         default:
  2141.             /* REVISIT when we get a schedule tree, periodic
  2142.              * transfers won't always be at the head of a
  2143.              * singleton queue...
  2144.              */
  2145.             sched = NULL;
  2146.             break;
  2147.         }
  2148.     }
  2149.  
  2150.     /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
  2151.     if (ret < 0 || (sched && qh != first_qh(sched))) {
  2152.         int ready = qh->is_ready;
  2153.  
  2154.         ret = 0;
  2155.         qh->is_ready = 0;
  2156.         __musb_giveback(musb, urb, 0);
  2157.         qh->is_ready = ready;
  2158.  
  2159.         /* If nothing else (usually musb_giveback) is using it
  2160.          * and its URB list has emptied, recycle this qh.
  2161.          */
  2162.         if (ready && list_empty(&qh->hep->urb_list)) {
  2163.             qh->hep->hcpriv = NULL;
  2164.             list_del(&qh->ring);
  2165.             kfree(qh);
  2166.         }
  2167.     } else
  2168.         ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
  2169. done:
  2170.     spin_unlock_irqrestore(&musb->lock, flags);
  2171.     return ret;
  2172. }
  2173.  
  2174. /* disable an endpoint */
  2175. static void
  2176. musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
  2177. {
  2178.     u8          epnum = hep->desc.bEndpointAddress;
  2179.     unsigned long       flags;
  2180.     struct musb     *musb = hcd_to_musb(hcd);
  2181.     u8          is_in = epnum & USB_DIR_IN;
  2182.     struct musb_qh      *qh;
  2183.     struct urb      *urb;
  2184.     struct list_head    *sched;
  2185.  
  2186.     spin_lock_irqsave(&musb->lock, flags);
  2187.  
  2188.     qh = hep->hcpriv;
  2189.     if (qh == NULL)
  2190.         goto exit;
  2191.  
  2192.     switch (qh->type) {
  2193.     case USB_ENDPOINT_XFER_CONTROL:
  2194.         sched = &musb->control_ep->in_list;
  2195.         break;
  2196.     case USB_ENDPOINT_XFER_BULK:
  2197.         if (qh->mux == 1) {
  2198.             if (is_in)
  2199.                 sched = &musb->bulk_ep->in_list;
  2200.             else
  2201.                 sched = &musb->bulk_ep->out_list;
  2202.             break;
  2203.         }
  2204.     default:
  2205.         /* REVISIT when we get a schedule tree, periodic transfers
  2206.          * won't always be at the head of a singleton queue...
  2207.          */
  2208.         sched = NULL;
  2209.         break;
  2210.     }
  2211.  
  2212.     /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
  2213.  
  2214.     /* kick first urb off the hardware, if needed */
  2215.     qh->is_ready = 0;
  2216.     if (!sched || qh == first_qh(sched)) {
  2217.         urb = next_urb(qh);
  2218.  
  2219.         /* make software (then hardware) stop ASAP */
  2220.         if (!urb->unlinked)
  2221.             urb->status = -ESHUTDOWN;
  2222.  
  2223.         /* cleanup */
  2224.         musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
  2225.  
  2226.         /* Then nuke all the others ... and advance the
  2227.          * queue on hw_ep (e.g. bulk ring) when we're done.
  2228.          */
  2229.         while (!list_empty(&hep->urb_list)) {
  2230.             urb = next_urb(qh);
  2231.             urb->status = -ESHUTDOWN;
  2232.             musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
  2233.         }
  2234.     } else {
  2235.         /* Just empty the queue; the hardware is busy with
  2236.          * other transfers, and since !qh->is_ready nothing
  2237.          * will activate any of these as it advances.
  2238.          */
  2239.         while (!list_empty(&hep->urb_list))
  2240.             __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
  2241.  
  2242.         hep->hcpriv = NULL;
  2243.         list_del(&qh->ring);
  2244.         kfree(qh);
  2245.     }
  2246. exit:
  2247.     spin_unlock_irqrestore(&musb->lock, flags);
  2248. }
  2249.  
  2250. static int musb_h_get_frame_number(struct usb_hcd *hcd)
  2251. {
  2252.     struct musb *musb = hcd_to_musb(hcd);
  2253.  
  2254.     return musb_readw(musb->mregs, MUSB_FRAME);
  2255. }
  2256.  
  2257. static int musb_h_start(struct usb_hcd *hcd)
  2258. {
  2259.     struct musb *musb = hcd_to_musb(hcd);
  2260.  
  2261.     /* NOTE: musb_start() is called when the hub driver turns
  2262.      * on port power, or when (OTG) peripheral starts.
  2263.      */
  2264.     hcd->state = HC_STATE_RUNNING;
  2265.     musb->port1_status = 0;
  2266.     return 0;
  2267. }
  2268.  
  2269. static void musb_h_stop(struct usb_hcd *hcd)
  2270. {
  2271.     musb_stop(hcd_to_musb(hcd));
  2272.     hcd->state = HC_STATE_HALT;
  2273. }
  2274.  
  2275. static int musb_bus_suspend(struct usb_hcd *hcd)
  2276. {
  2277.     struct musb *musb = hcd_to_musb(hcd);
  2278.  
  2279.     if (musb->xceiv->state == OTG_STATE_A_SUSPEND)
  2280.         return 0;
  2281.  
  2282.     if (is_host_active(musb) && musb->is_active) {
  2283.         WARNING("trying to suspend as %s is_active=%i\n",
  2284.             otg_state_string(musb), musb->is_active);
  2285.         return -EBUSY;
  2286.     } else
  2287.         return 0;
  2288. }
  2289.  
  2290. static int musb_bus_resume(struct usb_hcd *hcd)
  2291. {
  2292.     /* resuming child port does the work */
  2293.     return 0;
  2294. }
  2295.  
  2296. const struct hc_driver musb_hc_driver = {
  2297.     .description        = "musb-hcd",
  2298.     .product_desc       = "MUSB HDRC host driver",
  2299.     .hcd_priv_size      = sizeof(struct musb),
  2300.     .flags          = HCD_USB2 | HCD_MEMORY,
  2301.  
  2302.     /* not using irq handler or reset hooks from usbcore, since
  2303.      * those must be shared with peripheral code for OTG configs
  2304.      */
  2305.  
  2306.     .start          = musb_h_start,
  2307.     .stop           = musb_h_stop,
  2308.  
  2309.     .get_frame_number   = musb_h_get_frame_number,
  2310.  
  2311.     .urb_enqueue        = musb_urb_enqueue,
  2312.     .urb_dequeue        = musb_urb_dequeue,
  2313.     .endpoint_disable   = musb_h_disable,
  2314.  
  2315.     .hub_status_data    = musb_hub_status_data,
  2316.     .hub_control        = musb_hub_control,
  2317.     .bus_suspend        = musb_bus_suspend,
  2318.     .bus_resume     = musb_bus_resume,
  2319.     /* .start_port_reset    = NULL, */
  2320.     /* .hub_irq_enable  = NULL, */
  2321. };
Advertisement
Add Comment
Please, Sign In to add comment