Guest User

0904-backport-vanilla-eth-driver.patch

a guest
Jan 30th, 2019
166
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 24.67 KB | None | 0 0
  1. --- a/drivers/net/ethernet/lantiq_xrx200.c  2019-01-30 02:20:35.780993746 +0100
  2. +++ b/drivers/net/ethernet/lantiq_xrx200.c  2019-01-30 23:15:08.147803424 +0100
  3. @@ -39,12 +39,6 @@
  4.  #define SW_POLLING
  5.  #define SW_ROUTING
  6.  
  7. -#ifdef SW_ROUTING
  8. -#define XRX200_MAX_DEV     2
  9. -#else
  10. -#define XRX200_MAX_DEV     1
  11. -#endif
  12. -
  13.  #define XRX200_MAX_VLAN        64
  14.  #define XRX200_PCE_ACTVLAN_IDX 0x01
  15.  #define XRX200_PCE_VLANMAP_IDX 0x02
  16. @@ -207,46 +201,42 @@
  17.     int refcount;
  18.     int tx_free;
  19.  
  20. -   struct net_device dummy_dev;
  21. -   struct net_device *devs[XRX200_MAX_DEV];
  22. -
  23.     struct tasklet_struct tasklet;
  24.     struct napi_struct napi;
  25.     struct ltq_dma_channel dma;
  26.     struct sk_buff *skb[LTQ_DESC_NUM];
  27.  
  28. +   struct xrx200_priv *priv;
  29.     spinlock_t lock;
  30.  };
  31.  
  32. -struct xrx200_hw {
  33. -   struct clk *clk;
  34. -   struct mii_bus *mii_bus;
  35. -
  36. -   struct xrx200_chan chan[XRX200_MAX_DMA];
  37. -   u16 vlan_vid[XRX200_MAX_VLAN];
  38. -   u16 vlan_port_map[XRX200_MAX_VLAN];
  39. -
  40. -   struct net_device *devs[XRX200_MAX_DEV];
  41. -   int num_devs;
  42. -
  43. -   int port_map[XRX200_MAX_PORT];
  44. -   unsigned short wan_map;
  45. -
  46. -   struct switch_dev swdev;
  47. -};
  48. -
  49.  struct xrx200_priv {
  50.     struct net_device_stats stats;
  51.     int id;
  52. +  
  53. +   struct clk *clk;   
  54.  
  55. +   struct xrx200_chan chan_tx;
  56. +   struct xrx200_chan chan_rx;
  57. +   struct net_device *net_dev;
  58. +   struct device *dev;
  59. +      
  60.     struct xrx200_port port[XRX200_MAX_PORT];
  61.     int num_port;
  62.     bool wan;
  63.     bool sw;
  64. -   unsigned short port_map;
  65. +   unsigned short d_port_map;
  66.     unsigned char mac[6];
  67.  
  68. -   struct xrx200_hw *hw;
  69. +   struct mii_bus *mii_bus;
  70. +
  71. +   u16 vlan_vid[XRX200_MAX_VLAN];
  72. +   u16 vlan_port_map[XRX200_MAX_VLAN];
  73. +
  74. +   int port_map[XRX200_MAX_PORT];
  75. +   unsigned short wan_map;
  76. +
  77. +   struct switch_dev swdev;
  78.  };
  79.  
  80.  static __iomem void *xrx200_switch_membase;
  81. @@ -470,14 +460,14 @@
  82.  }
  83.  
  84.  // swconfig interface
  85. -static void xrx200_hw_init(struct xrx200_hw *hw);
  86. +static void xrx200_hw_init(struct xrx200_priv *priv);
  87.  
  88.  // global
  89.  static int xrx200sw_reset_switch(struct switch_dev *dev)
  90.  {
  91. -   struct xrx200_hw *hw = container_of(dev, struct xrx200_hw, swdev);
  92. +   struct xrx200_priv *priv = container_of(dev, struct xrx200_priv, swdev);
  93.  
  94. -   xrx200_hw_init(hw);
  95. +   xrx200_hw_init(priv);
  96.  
  97.     return 0;
  98.  }
  99. @@ -523,7 +513,7 @@
  100.  static int xrx200sw_set_vlan_vid(struct switch_dev *dev, const struct switch_attr *attr,
  101.                  struct switch_val *val)
  102.  {
  103. -   struct xrx200_hw *hw = container_of(dev, struct xrx200_hw, swdev);
  104. +   struct xrx200_priv *priv = container_of(dev, struct xrx200_priv, swdev);
  105.     int i;
  106.     struct xrx200_pce_table_entry tev;
  107.     struct xrx200_pce_table_entry tem;
  108. @@ -538,7 +528,7 @@
  109.             return -EINVAL;
  110.     }
  111.  
  112. -   hw->vlan_vid[val->port_vlan] = val->value.i;
  113. +   priv->vlan_vid[val->port_vlan] = val->value.i;
  114.  
  115.     tev.index = val->port_vlan;
  116.     xrx200_pce_table_entry_read(&tev);
  117. @@ -571,7 +561,7 @@
  118.  
  119.  static int xrx200sw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val)
  120.  {
  121. -   struct xrx200_hw *hw = container_of(dev, struct xrx200_hw, swdev);
  122. +   struct xrx200_priv *priv = container_of(dev, struct xrx200_priv, swdev);
  123.     int i, portmap, tagmap, untagged;
  124.     struct xrx200_pce_table_entry tem;
  125.  
  126. @@ -624,7 +614,7 @@
  127.  
  128.     ltq_switch_w32_mask(0, portmap, PCE_PMAP2);
  129.     ltq_switch_w32_mask(0, portmap, PCE_PMAP3);
  130. -   hw->vlan_port_map[val->port_vlan] = portmap;
  131. +   priv->vlan_port_map[val->port_vlan] = portmap;
  132.  
  133.     xrx200sw_fixup_pvids();
  134.  
  135. @@ -834,19 +824,16 @@
  136.  // .get_port_stats = xrx200sw_get_port_stats, //TODO
  137.  };
  138.  
  139. -static int xrx200sw_init(struct xrx200_hw *hw)
  140. +static int xrx200sw_init(struct xrx200_priv *priv)
  141.  {
  142. -   int netdev_num;
  143.  
  144. -   for (netdev_num = 0; netdev_num < hw->num_devs; netdev_num++)
  145. -   {
  146.         struct switch_dev *swdev;
  147. -       struct net_device *dev = hw->devs[netdev_num];
  148. -       struct xrx200_priv *priv = netdev_priv(dev);
  149. -       if (!priv->sw)
  150. -           continue;
  151. +       if (!priv->sw) {
  152. +           pr_info("!!!! no switch\n");
  153. +           return -ENODEV;
  154. +       }
  155.  
  156. -       swdev = &hw->swdev;
  157. +       swdev = &priv->swdev;
  158.  
  159.         swdev->name = "Lantiq XRX200 Switch";
  160.         swdev->vlans = XRX200_MAX_VLAN;
  161. @@ -854,32 +841,49 @@
  162.         swdev->cpu_port = 6;
  163.         swdev->ops = &xrx200sw_ops;
  164.  
  165. -       register_switch(swdev, dev);
  166. +       register_switch(swdev, priv->net_dev);
  167.         return 0; // enough switches
  168. +}
  169. +
  170. +/* drop all the packets from the DMA ring */
  171. +static void xrx200_flush_dma(struct xrx200_chan *ch)
  172. +{
  173. +   int i;
  174. +
  175. +   for (i = 0; i < LTQ_DESC_NUM; i++) {
  176. +       struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  177. +
  178. +       if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
  179. +           break;
  180. +
  181. +       desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
  182. +               XRX200_DMA_DATA_LEN;
  183. +       ch->dma.desc++;
  184. +       ch->dma.desc %= LTQ_DESC_NUM;
  185.     }
  186. -   return 0;
  187.  }
  188.  
  189.  static int xrx200_open(struct net_device *dev)
  190.  {
  191.     struct xrx200_priv *priv = netdev_priv(dev);
  192. -   int i;
  193.  
  194. -   for (i = 0; i < XRX200_MAX_DMA; i++) {
  195. -       if (!priv->hw->chan[i].dma.irq)
  196. -           continue;
  197. -       spin_lock_bh(&priv->hw->chan[i].lock);
  198. -       if (!priv->hw->chan[i].refcount) {
  199. -           if (XRX200_DMA_IS_RX(i))
  200. -               napi_enable(&priv->hw->chan[i].napi);
  201. -           ltq_dma_open(&priv->hw->chan[i].dma);
  202. -       }
  203. -       priv->hw->chan[i].refcount++;
  204. -       spin_unlock_bh(&priv->hw->chan[i].lock);
  205. -   }
  206. -   for (i = 0; i < priv->num_port; i++)
  207. -       if (priv->port[i].phydev)
  208. -           phy_start(priv->port[i].phydev);
  209. +   napi_enable(&priv->chan_tx.napi);
  210. +   ltq_dma_open(&priv->chan_tx.dma);
  211. +   ltq_dma_enable_irq(&priv->chan_tx.dma);
  212. +
  213. +   napi_enable(&priv->chan_rx.napi);
  214. +   ltq_dma_open(&priv->chan_rx.dma);
  215. +   /* The boot loader does not always deactivate the receiving of frames
  216. +    * on the ports and then some packets queue up in the PPE buffers.
  217. +    * They already passed the PMAC so they do not have the tags
  218. +    * configured here. Read the these packets here and drop them.
  219. +    * The HW should have written them into memory after 10us
  220. +    */
  221. +   usleep_range(20, 40);
  222. +   xrx200_flush_dma(&priv->chan_rx);
  223. +
  224. +   ltq_dma_enable_irq(&priv->chan_rx.dma);
  225. +  
  226.     netif_wake_queue(dev);
  227.  
  228.     return 0;
  229. @@ -896,19 +900,11 @@
  230.         if (priv->port[i].phydev)
  231.             phy_stop(priv->port[i].phydev);
  232.  
  233. -   for (i = 0; i < XRX200_MAX_DMA; i++) {
  234. -       if (!priv->hw->chan[i].dma.irq)
  235. -           continue;
  236. +   napi_disable(&priv->chan_rx.napi);
  237. +   ltq_dma_close(&priv->chan_rx.dma);
  238.  
  239. -       priv->hw->chan[i].refcount--;
  240. -       if (!priv->hw->chan[i].refcount) {
  241. -           if (XRX200_DMA_IS_RX(i))
  242. -               napi_disable(&priv->hw->chan[i].napi);
  243. -           spin_lock_bh(&priv->hw->chan[i].lock);
  244. -           ltq_dma_close(&priv->hw->chan[XRX200_DMA_RX].dma);
  245. -           spin_unlock_bh(&priv->hw->chan[i].lock);
  246. -       }
  247. -   }
  248. +   napi_disable(&priv->chan_tx.napi);
  249. +   ltq_dma_close(&priv->chan_tx.dma);
  250.  
  251.     return 0;
  252.  }
  253. @@ -938,8 +934,8 @@
  254.  
  255.  static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
  256.  {
  257. -   struct net_device *dev = ch->devs[id];
  258. -   struct xrx200_priv *priv = netdev_priv(dev);
  259. +   struct xrx200_priv *priv = ch->priv;
  260. +   struct net_device *dev = priv->net_dev;
  261.     struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  262.     struct sk_buff *skb = ch->skb[ch->dma.desc];
  263.     int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
  264. @@ -963,15 +959,15 @@
  265.     skb->dev = dev;
  266.     skb->protocol = eth_type_trans(skb, dev);
  267.     netif_receive_skb(skb);
  268. -   priv->stats.rx_packets++;
  269. -   priv->stats.rx_bytes+=len;
  270. +   dev->stats.rx_packets++;
  271. +   dev->stats.rx_bytes+=len;
  272.  }
  273.  
  274.  static int xrx200_poll_rx(struct napi_struct *napi, int budget)
  275.  {
  276.     struct xrx200_chan *ch = container_of(napi,
  277.                 struct xrx200_chan, napi);
  278. -   struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
  279. +   struct xrx200_priv *priv = ch->priv;
  280.     int rx = 0;
  281.     int complete = 0;
  282.  
  283. @@ -982,7 +978,7 @@
  284.             struct sk_buff *skb = ch->skb[ch->dma.desc];
  285.             u8 *special_tag = (u8*)skb->data;
  286.             int port = (special_tag[7] >> SPPID_SHIFT) & SPPID_MASK;
  287. -           xrx200_hw_receive(ch, priv->hw->port_map[port]);
  288. +           xrx200_hw_receive(ch, priv->port_map[port]);
  289.  #else
  290.             xrx200_hw_receive(ch, 0);
  291.  #endif
  292. @@ -993,47 +989,63 @@
  293.     }
  294.  
  295.     if (complete || !rx) {
  296. -       napi_complete(&ch->napi);
  297. +
  298. +       if (napi_complete_done(&ch->napi,rx)) {
  299. +       ltq_dma_ack_irq(&ch->dma);
  300.         ltq_dma_enable_irq(&ch->dma);
  301. +       }
  302.     }
  303.  
  304.     return rx;
  305.  }
  306.  
  307. -static void xrx200_tx_housekeeping(unsigned long ptr)
  308. +
  309. +static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
  310.  {
  311. -   struct xrx200_chan *ch = (struct xrx200_chan *) ptr;
  312. +   struct xrx200_priv *priv = netdev_priv(dev);
  313. +
  314. +   return &priv->stats;
  315. +}
  316. +
  317. +
  318. +static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
  319. +{
  320. +   struct xrx200_chan *ch = container_of(napi,
  321. +               struct xrx200_chan, napi);
  322. +   struct net_device *net_dev = ch->priv->net_dev;
  323.     int pkts = 0;
  324. -   int i;
  325. +   int bytes = 0;
  326.  
  327. -   spin_lock_bh(&ch->lock);
  328. -   ltq_dma_ack_irq(&ch->dma);
  329. -   while ((ch->dma.desc_base[ch->tx_free].ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  330. -       struct sk_buff *skb = ch->skb[ch->tx_free];
  331. +   while (pkts < budget) {
  332. +       struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
  333. +
  334. +       if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  335. +           struct sk_buff *skb = ch->skb[ch->tx_free];
  336.  
  337. -       pkts++;
  338. -       ch->skb[ch->tx_free] = NULL;
  339. -       dev_kfree_skb(skb);
  340. -       memset(&ch->dma.desc_base[ch->tx_free], 0,
  341. -           sizeof(struct ltq_dma_desc));
  342. -       ch->tx_free++;
  343. -       ch->tx_free %= LTQ_DESC_NUM;
  344. +           pkts++;
  345. +           bytes += skb->len;
  346. +           ch->skb[ch->tx_free] = NULL;
  347. +           consume_skb(skb);
  348. +           memset(&ch->dma.desc_base[ch->tx_free], 0,
  349. +                  sizeof(struct ltq_dma_desc));
  350. +           ch->tx_free++;
  351. +           ch->tx_free %= LTQ_DESC_NUM;
  352. +       } else {
  353. +           break;
  354. +       }
  355.     }
  356. -   ltq_dma_enable_irq(&ch->dma);
  357. -   spin_unlock_bh(&ch->lock);
  358.  
  359. -   if (!pkts)
  360. -       return;
  361. +   net_dev->stats.tx_packets += pkts;
  362. +   net_dev->stats.tx_bytes += bytes;
  363.  
  364. -   for (i = 0; i < XRX200_MAX_DEV && ch->devs[i]; i++)
  365. -       netif_wake_queue(ch->devs[i]);
  366. -}
  367. -
  368. -static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
  369. -{
  370. -   struct xrx200_priv *priv = netdev_priv(dev);
  371. +   if (pkts < budget) {
  372. +       if (napi_complete_done(&ch->napi, pkts)) {
  373. +           ltq_dma_ack_irq(&ch->dma);
  374. +           ltq_dma_enable_irq(&ch->dma);
  375. +       }
  376. +   }
  377.  
  378. -   return &priv->stats;
  379. +   return pkts;
  380.  }
  381.  
  382.  static void xrx200_tx_timeout(struct net_device *dev)
  383. @@ -1043,13 +1055,17 @@
  384.     printk(KERN_ERR "%s: transmit timed out, disable the dma channel irq\n", dev->name);
  385.  
  386.     priv->stats.tx_errors++;
  387. +  
  388. +   ltq_dma_enable_irq(&priv->chan_tx.dma);  //TODO necessary?
  389. +  
  390.     netif_wake_queue(dev);
  391.  }
  392.  
  393.  static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
  394.  {
  395.     struct xrx200_priv *priv = netdev_priv(dev);
  396. -   struct xrx200_chan *ch;
  397. +   struct xrx200_chan *ch = &priv->chan_tx;
  398. +
  399.     struct ltq_dma_desc *desc;
  400.     u32 byte_offset;
  401.     int ret = NETDEV_TX_OK;
  402. @@ -1057,10 +1073,6 @@
  403.  #ifdef SW_ROUTING
  404.     u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
  405.  #endif
  406. -   if(priv->id)
  407. -       ch = &priv->hw->chan[XRX200_DMA_TX_2];
  408. -   else
  409. -       ch = &priv->hw->chan[XRX200_DMA_TX];
  410.  
  411.     desc = &ch->dma.desc_base[ch->dma.desc];
  412.  
  413. @@ -1069,7 +1081,7 @@
  414.  
  415.  #ifdef SW_ROUTING
  416.     if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
  417. -       u16 port_map = priv->port_map;
  418. +       u16 port_map = priv->d_port_map;
  419.  
  420.         if (priv->sw && skb->protocol == htons(ETH_P_8021Q)) {
  421.             u16 vid;
  422. @@ -1078,9 +1090,9 @@
  423.             port_map = 0;
  424.             if (!__vlan_get_tag(skb, &vid)) {
  425.                 for (i = 0; i < XRX200_MAX_VLAN; i++) {
  426. -                   if (priv->hw->vlan_vid[i] != vid)
  427. +                   if (priv->vlan_vid[i] != vid)
  428.                         continue;
  429. -                   port_map = priv->hw->vlan_port_map[i];
  430. +                   port_map = priv->vlan_port_map[i];
  431.                     break;
  432.                 }
  433.             }
  434. @@ -1114,9 +1126,7 @@
  435.  
  436.     ch->skb[ch->dma.desc] = skb;
  437.  
  438. -   netif_trans_update(dev);
  439. -
  440. -   desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
  441. +   desc->addr = ((unsigned int) dma_map_single(priv->dev, skb->data, len,
  442.                         DMA_TO_DEVICE)) - byte_offset;
  443.     wmb();
  444.     desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
  445. @@ -1126,71 +1136,81 @@
  446.     if (ch->dma.desc == ch->tx_free)
  447.         netif_stop_queue(dev);
  448.  
  449. -
  450. -   priv->stats.tx_packets++;
  451. -   priv->stats.tx_bytes+=len;
  452. -
  453. +   skb_tx_timestamp(skb);
  454. +  
  455.  out:
  456.     spin_unlock_bh(&ch->lock);
  457.  
  458.     return ret;
  459.  }
  460.  
  461. -static irqreturn_t xrx200_dma_irq(int irq, void *priv)
  462. +static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
  463.  {
  464. -   struct xrx200_hw *hw = priv;
  465. -   int chnr = irq - XRX200_DMA_IRQ;
  466. -   struct xrx200_chan *ch = &hw->chan[chnr];
  467. +   struct xrx200_chan *ch = ptr;
  468.  
  469.     ltq_dma_disable_irq(&ch->dma);
  470.     ltq_dma_ack_irq(&ch->dma);
  471. -
  472. -   if (chnr % 2)
  473. -       tasklet_schedule(&ch->tasklet);
  474. -   else
  475. -       napi_schedule(&ch->napi);
  476. +   napi_schedule(&ch->napi);
  477.  
  478.     return IRQ_HANDLED;
  479.  }
  480.  
  481. -static int xrx200_dma_init(struct xrx200_hw *hw)
  482. +static int xrx200_dma_init(struct xrx200_priv *priv)
  483.  {
  484. -   int i, err = 0;
  485. +   int i;
  486. +   struct xrx200_chan *ch_rx = &priv->chan_rx;
  487. +   struct xrx200_chan *ch_tx = &priv->chan_tx;
  488. +   int ret;
  489.  
  490.     ltq_dma_init_port(DMA_PORT_ETOP);
  491.  
  492. -   for (i = 0; i < 8 && !err; i++) {
  493. -       int irq = XRX200_DMA_IRQ + i;
  494. -       struct xrx200_chan *ch = &hw->chan[i];
  495. -
  496. -       spin_lock_init(&ch->lock);
  497. -
  498. -       ch->idx = ch->dma.nr = i;
  499. -
  500. -       if (i == XRX200_DMA_TX) {
  501. -           ltq_dma_alloc_tx(&ch->dma);
  502. -           err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx", hw);
  503. -       } else if (i == XRX200_DMA_TX_2) {
  504. -           ltq_dma_alloc_tx(&ch->dma);
  505. -           err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx_2", hw);
  506. -       } else if (i == XRX200_DMA_RX) {
  507. -           ltq_dma_alloc_rx(&ch->dma);
  508. -           for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
  509. -                   ch->dma.desc++)
  510. -               if (xrx200_alloc_skb(ch))
  511. -                   err = -ENOMEM;
  512. -           ch->dma.desc = 0;
  513. -           err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_rx", hw);
  514. -       } else
  515. -           continue;
  516. +   ch_rx->dma.nr = XRX200_DMA_RX;
  517. +   ch_rx->priv = priv;
  518. +  
  519. +   ltq_dma_alloc_rx(&ch_rx->dma);
  520. +   for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
  521. +        ch_rx->dma.desc++) {
  522. +       ret = xrx200_alloc_skb(ch_rx);
  523. +       if (ret)
  524. +           goto rx_free;
  525. +   }
  526. +   ch_rx->dma.desc = 0;
  527. +  
  528. +   ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
  529. +                  "vrx200_rx", &priv->chan_rx);
  530. +   if (ret) {
  531. +       dev_err(priv->dev, "failed to request RX irq %d\n",
  532. +           ch_rx->dma.irq);
  533. +       goto rx_ring_free;
  534. +   }
  535. +      
  536. +   ch_tx->dma.nr = XRX200_DMA_TX;
  537. +   ch_tx->priv = priv;
  538. +
  539. +   ltq_dma_alloc_tx(&ch_tx->dma);
  540. +   ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
  541. +                  "vrx200_tx", &priv->chan_tx);
  542. +   if (ret) {
  543. +       dev_err(priv->dev, "failed to request TX irq %d\n",
  544. +           ch_tx->dma.irq);
  545. +       goto tx_free;
  546. +   }
  547.  
  548. -       if (!err)
  549. -           ch->dma.irq = irq;
  550. -       else
  551. -           pr_err("net-xrx200: failed to request irq %d\n", irq);
  552. +   return ret;
  553. +
  554. +tx_free:
  555. +   ltq_dma_free(&ch_tx->dma);
  556. +
  557. +rx_ring_free:
  558. +   /* free the allocated RX ring */
  559. +   for (i = 0; i < LTQ_DESC_NUM; i++) {
  560. +       if (priv->chan_rx.skb[i])
  561. +           dev_kfree_skb_any(priv->chan_rx.skb[i]);
  562.     }
  563.  
  564. -   return err;
  565. +rx_free:
  566. +   ltq_dma_free(&ch_rx->dma);
  567. +   return ret;
  568.  }
  569.  
  570.  #ifdef SW_POLLING
  571. @@ -1328,11 +1348,12 @@
  572.  {
  573.     struct net_device *netdev = phydev->attached_dev;
  574.  
  575. -   if (do_carrier)
  576. +   if (do_carrier) {
  577.         if (up)
  578.             netif_carrier_on(netdev);
  579.         else if (!xrx200_phy_has_link(netdev))
  580.             netif_carrier_off(netdev);
  581. +   }
  582.  
  583.     phydev->adjust_link(netdev);
  584.  }
  585. @@ -1343,7 +1364,7 @@
  586.     struct phy_device *phydev = NULL;
  587.     unsigned val;
  588.  
  589. -   phydev = mdiobus_get_phy(priv->hw->mii_bus, port->phy_addr);
  590. +   phydev = mdiobus_get_phy(priv->mii_bus, port->phy_addr);
  591.  
  592.     if (!phydev) {
  593.         netdev_err(dev, "no PHY found\n");
  594. @@ -1376,10 +1397,10 @@
  595.  #ifdef SW_POLLING
  596.     phy_read_status(phydev);
  597.  
  598. -   val = xrx200_mdio_rd(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
  599. +   val = xrx200_mdio_rd(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
  600.     val |= ADVERTIZE_MPD;
  601. -   xrx200_mdio_wr(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
  602. -   xrx200_mdio_wr(priv->hw->mii_bus, 0, 0, 0x1040);
  603. +   xrx200_mdio_wr(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
  604. +   xrx200_mdio_wr(priv->mii_bus, 0, 0, 0x1040);
  605.  
  606.     phy_start_aneg(phydev);
  607.  #endif
  608. @@ -1522,12 +1543,12 @@
  609.     ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
  610.  }
  611.  
  612. -static void xrx200_hw_init(struct xrx200_hw *hw)
  613. +static void xrx200_hw_init(struct xrx200_priv *priv)
  614.  {
  615.     int i;
  616.  
  617.     /* enable clock gate */
  618. -   clk_enable(hw->clk);
  619. +   clk_enable(priv->clk);
  620.  
  621.     ltq_switch_w32(1, 0);
  622.     mdelay(100);
  623. @@ -1595,49 +1616,45 @@
  624.     xrx200sw_write_x(1, XRX200_BM_QUEUE_GCTRL_GL_MOD, 0);
  625.  
  626.     for (i = 0; i < XRX200_MAX_VLAN; i++)
  627. -       hw->vlan_vid[i] = i;
  628. +       priv->vlan_vid[i] = i;
  629.  }
  630.  
  631. -static void xrx200_hw_cleanup(struct xrx200_hw *hw)
  632. +static void xrx200_hw_cleanup(struct xrx200_priv *priv)
  633.  {
  634.     int i;
  635.  
  636.     /* disable the switch */
  637.     ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
  638.  
  639. -   /* free the channels and IRQs */
  640. -   for (i = 0; i < 2; i++) {
  641. -       ltq_dma_free(&hw->chan[i].dma);
  642. -       if (hw->chan[i].dma.irq)
  643. -           free_irq(hw->chan[i].dma.irq, hw);
  644. -   }
  645. +   ltq_dma_free(&priv->chan_tx.dma);
  646. +   ltq_dma_free(&priv->chan_rx.dma);
  647.  
  648.     /* free the allocated RX ring */
  649.     for (i = 0; i < LTQ_DESC_NUM; i++)
  650. -       dev_kfree_skb_any(hw->chan[XRX200_DMA_RX].skb[i]);
  651. +       dev_kfree_skb_any(priv->chan_rx.skb[i]);
  652.  
  653.     /* clear the mdio bus */
  654. -   mdiobus_unregister(hw->mii_bus);
  655. -   mdiobus_free(hw->mii_bus);
  656. +   mdiobus_unregister(priv->mii_bus);
  657. +   mdiobus_free(priv->mii_bus);
  658.  
  659.     /* release the clock */
  660. -   clk_disable(hw->clk);
  661. -   clk_put(hw->clk);
  662. +   clk_disable(priv->clk);
  663. +   clk_put(priv->clk);
  664.  }
  665.  
  666. -static int xrx200_of_mdio(struct xrx200_hw *hw, struct device_node *np)
  667. +static int xrx200_of_mdio(struct xrx200_priv *priv, struct device_node *np)
  668.  {
  669. -   hw->mii_bus = mdiobus_alloc();
  670. -   if (!hw->mii_bus)
  671. +   priv->mii_bus = mdiobus_alloc();
  672. +   if (!priv->mii_bus)
  673.         return -ENOMEM;
  674.  
  675. -   hw->mii_bus->read = xrx200_mdio_rd;
  676. -   hw->mii_bus->write = xrx200_mdio_wr;
  677. -   hw->mii_bus->name = "lantiq,xrx200-mdio";
  678. -   snprintf(hw->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
  679. +   priv->mii_bus->read = xrx200_mdio_rd;
  680. +   priv->mii_bus->write = xrx200_mdio_wr;
  681. +   priv->mii_bus->name = "lantiq,xrx200-mdio";
  682. +   snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
  683.  
  684. -   if (of_mdiobus_register(hw->mii_bus, np)) {
  685. -       mdiobus_free(hw->mii_bus);
  686. +   if (of_mdiobus_register(priv->mii_bus, np)) {
  687. +       mdiobus_free(priv->mii_bus);
  688.         return -ENXIO;
  689.     }
  690.  
  691. @@ -1677,12 +1694,12 @@
  692.         }
  693.     /* is this port a wan port ? */
  694.     if (priv->wan)
  695. -       priv->hw->wan_map |= BIT(p->num);
  696. +       priv->wan_map |= BIT(p->num);
  697.  
  698. -   priv->port_map |= BIT(p->num);
  699. +   priv->d_port_map |= BIT(p->num);
  700.  
  701.     /* store the port id in the hw struct so we can map ports -> devices */
  702. -   priv->hw->port_map[p->num] = priv->hw->num_devs;
  703. +   priv->port_map[p->num] = 0;
  704.  }
  705.  
  706.  static const struct net_device_ops xrx200_netdev_ops = {
  707. @@ -1696,29 +1713,21 @@
  708.     .ndo_tx_timeout     = xrx200_tx_timeout,
  709.  };
  710.  
  711. -static void xrx200_of_iface(struct xrx200_hw *hw, struct device_node *iface, struct device *dev)
  712. +static void xrx200_of_iface(struct xrx200_priv *priv, struct device_node *iface, struct device *dev)
  713.  {
  714. -   struct xrx200_priv *priv;
  715.     struct device_node *port;
  716.     const __be32 *wan;
  717.     const u8 *mac;
  718.  
  719. -   /* alloc the network device */
  720. -   hw->devs[hw->num_devs] = alloc_etherdev(sizeof(struct xrx200_priv));
  721. -   if (!hw->devs[hw->num_devs])
  722. -       return;
  723. -
  724.     /* setup the network device */
  725. -   strcpy(hw->devs[hw->num_devs]->name, "eth%d");
  726. -   hw->devs[hw->num_devs]->netdev_ops = &xrx200_netdev_ops;
  727. -   hw->devs[hw->num_devs]->watchdog_timeo = XRX200_TX_TIMEOUT;
  728. -   hw->devs[hw->num_devs]->needed_headroom = XRX200_HEADROOM;
  729. -   SET_NETDEV_DEV(hw->devs[hw->num_devs], dev);
  730. +   strcpy(priv->net_dev->name, "eth%d");
  731. +   priv->net_dev->netdev_ops = &xrx200_netdev_ops;
  732. +   priv->net_dev->watchdog_timeo = XRX200_TX_TIMEOUT;
  733. +   priv->net_dev->needed_headroom = XRX200_HEADROOM;
  734. +   SET_NETDEV_DEV(priv->net_dev, dev);
  735.  
  736.     /* setup our private data */
  737. -   priv = netdev_priv(hw->devs[hw->num_devs]);
  738. -   priv->hw = hw;
  739. -   priv->id = hw->num_devs;
  740. +   priv->id = 0;
  741.  
  742.     mac = of_get_mac_address(iface);
  743.     if (mac)
  744. @@ -1738,20 +1747,33 @@
  745.         if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
  746.             xrx200_of_port(priv, port);
  747.  
  748. -   /* register the actual device */
  749. -   if (!register_netdev(hw->devs[hw->num_devs]))
  750. -       hw->num_devs++;
  751.  }
  752.  
  753. -static struct xrx200_hw xrx200_hw;
  754. -
  755.  static int xrx200_probe(struct platform_device *pdev)
  756.  {
  757. +   struct device *dev = &pdev->dev;
  758.     struct resource *res[4];
  759.     struct device_node *mdio_np, *iface_np, *phy_np;
  760.     struct of_phandle_iterator it;
  761.     int err;
  762.     int i;
  763. +   struct xrx200_priv *priv;  
  764. +   struct net_device *net_dev;
  765. +
  766. +
  767. +   /* alloc the network device */
  768. +   net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
  769. +   if (!net_dev)
  770. +       return -ENOMEM;
  771. +
  772. +   priv = netdev_priv(net_dev);
  773. +   priv->net_dev = net_dev;
  774. +   priv->dev = dev;
  775. +
  776. +   net_dev->netdev_ops = &xrx200_netdev_ops;
  777. +   SET_NETDEV_DEV(net_dev, dev);
  778. +   net_dev->min_mtu = ETH_ZLEN;
  779. +   net_dev->max_mtu = XRX200_DMA_DATA_LEN;
  780.  
  781.     /* load the memory ranges */
  782.     for (i = 0; i < 4; i++) {
  783. @@ -1781,85 +1803,90 @@
  784.                 return -EPROBE_DEFER;
  785.         }
  786.     }
  787. +  
  788. +   priv->chan_rx.dma.irq = XRX200_DMA_IRQ + XRX200_DMA_RX;
  789. +   priv->chan_tx.dma.irq = XRX200_DMA_IRQ + XRX200_DMA_TX;
  790. +   priv->chan_rx.priv = priv;
  791. +   priv->chan_tx.priv = priv;
  792.  
  793.     /* get the clock */
  794. -   xrx200_hw.clk = clk_get(&pdev->dev, NULL);
  795. -   if (IS_ERR(xrx200_hw.clk)) {
  796. +   priv->clk = clk_get(&pdev->dev, NULL);
  797. +   if (IS_ERR(priv->clk)) {
  798.         dev_err(&pdev->dev, "failed to get clock\n");
  799. -       return PTR_ERR(xrx200_hw.clk);
  800. +       return PTR_ERR(priv->clk);
  801.     }
  802.  
  803.     /* bring up the dma engine and IP core */
  804. -   xrx200_dma_init(&xrx200_hw);
  805. -   xrx200_hw_init(&xrx200_hw);
  806. -   tasklet_init(&xrx200_hw.chan[XRX200_DMA_TX].tasklet, xrx200_tx_housekeeping, (u32) &xrx200_hw.chan[XRX200_DMA_TX]);
  807. -   tasklet_init(&xrx200_hw.chan[XRX200_DMA_TX_2].tasklet, xrx200_tx_housekeeping, (u32) &xrx200_hw.chan[XRX200_DMA_TX_2]);
  808. +   err = xrx200_dma_init(priv);
  809. +   if (err)
  810. +       return err;
  811. +
  812. +   /* enable clock gate */
  813. +   err = clk_prepare_enable(priv->clk);
  814. +   if (err)
  815. +       goto err_uninit_dma;
  816. +
  817. +   xrx200_hw_init(priv);
  818.  
  819.     /* bring up the mdio bus */
  820.     mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
  821.                 "lantiq,xrx200-mdio");
  822.     if (mdio_np)
  823. -       if (xrx200_of_mdio(&xrx200_hw, mdio_np))
  824. +       if (xrx200_of_mdio(priv, mdio_np))
  825.             dev_err(&pdev->dev, "mdio probe failed\n");
  826.  
  827.     /* load the interfaces */
  828.     for_each_child_of_node(pdev->dev.of_node, iface_np)
  829. -       if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
  830. -           if (xrx200_hw.num_devs < XRX200_MAX_DEV)
  831. -               xrx200_of_iface(&xrx200_hw, iface_np, &pdev->dev);
  832. -           else
  833. -               dev_err(&pdev->dev,
  834. -                   "only %d interfaces allowed\n",
  835. -                   XRX200_MAX_DEV);
  836. -       }
  837. -
  838. -   if (!xrx200_hw.num_devs) {
  839. -       xrx200_hw_cleanup(&xrx200_hw);
  840. -       dev_err(&pdev->dev, "failed to load interfaces\n");
  841. -       return -ENOENT;
  842. -   }
  843. -
  844. -   xrx200sw_init(&xrx200_hw);
  845. +           if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
  846. +               xrx200_of_iface(priv, iface_np, &pdev->dev);
  847. +               break;  //hack
  848. +           }
  849. +                      
  850. +   xrx200sw_init(priv);
  851.  
  852.     /* set wan port mask */
  853. -   ltq_pmac_w32(xrx200_hw.wan_map, PMAC_EWAN);
  854. -
  855. -   for (i = 0; i < xrx200_hw.num_devs; i++) {
  856. -       xrx200_hw.chan[XRX200_DMA_RX].devs[i] = xrx200_hw.devs[i];
  857. -       xrx200_hw.chan[XRX200_DMA_TX].devs[i] = xrx200_hw.devs[i];
  858. -       xrx200_hw.chan[XRX200_DMA_TX_2].devs[i] = xrx200_hw.devs[i];
  859. -   }
  860. +   ltq_pmac_w32(priv->wan_map, PMAC_EWAN);
  861.  
  862.     /* setup NAPI */
  863. -   init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev);
  864. -   netif_napi_add(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev,
  865. -           &xrx200_hw.chan[XRX200_DMA_RX].napi, xrx200_poll_rx, 32);
  866. +   netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);   //32
  867. +   netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
  868. +  
  869. +   platform_set_drvdata(pdev, priv);
  870.  
  871. -   platform_set_drvdata(pdev, &xrx200_hw);
  872. +   err = register_netdev(net_dev);
  873. +   if (err)
  874. +       goto err_unprepare_clk;
  875.  
  876.     return 0;
  877. +
  878. +err_unprepare_clk:
  879. +   clk_disable_unprepare(priv->clk);
  880. +
  881. +err_uninit_dma:
  882. +   xrx200_hw_cleanup(priv);
  883. +
  884. +   return err;
  885.  }
  886.  
  887.  static int xrx200_remove(struct platform_device *pdev)
  888.  {
  889. -   struct net_device *dev = platform_get_drvdata(pdev);
  890. -   struct xrx200_priv *priv;
  891. -
  892. -   if (!dev)
  893. -       return 0;
  894.  
  895. -   priv = netdev_priv(dev);
  896. +   struct xrx200_priv *priv = platform_get_drvdata(pdev);
  897. +   struct net_device *net_dev = priv->net_dev;
  898.  
  899.     /* free stack related instances */
  900. -   netif_stop_queue(dev);
  901. -   netif_napi_del(&xrx200_hw.chan[XRX200_DMA_RX].napi);
  902. -
  903. -   /* shut down hardware */
  904. -   xrx200_hw_cleanup(&xrx200_hw);
  905. +   netif_stop_queue(net_dev);
  906. +   netif_napi_del(&priv->chan_tx.napi);
  907. +   netif_napi_del(&priv->chan_rx.napi);
  908.  
  909.     /* remove the actual device */
  910. -   unregister_netdev(dev);
  911. -   free_netdev(dev);
  912. +   unregister_netdev(net_dev);
  913. +
  914. +   /* release the clock */
  915. +   clk_disable_unprepare(priv->clk);
  916. +
  917. +   /* shut down hardware */
  918. +   xrx200_hw_cleanup(priv);
  919.  
  920.     return 0;
  921.  }
Add Comment
Please, Sign In to add comment