Advertisement
Guest User

lantiq_xrx200.c

a guest
Feb 17th, 2019
182
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 53.61 KB | None | 0 0
  1. /*
  2. * This program is free software; you can redistribute it and/or modify it
  3. * under the terms of the GNU General Public License version 2 as published
  4. * by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  14. *
  15. * Copyright (C) 2010 Lantiq Deutschland
  16. * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
  17. */
  18.  
  19. #include <linux/switch.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/clk.h>
  25. #include <linux/if_vlan.h>
  26. #include <asm/delay.h>
  27.  
  28. #include <linux/of_net.h>
  29. #include <linux/of_mdio.h>
  30. #include <linux/of_gpio.h>
  31. #include <linux/of_platform.h>
  32.  
  33. #include <xway_dma.h>
  34. #include <lantiq_soc.h>
  35.  
  36. #include "lantiq_pce.h"
  37. #include "lantiq_xrx200_sw.h"
  38.  
  39. #include <linux/time.h> //pc2005 time tests
  40.  
  41. #define SW_POLLING
  42. #define SW_ROUTING
  43.  
  44. #define XRX200_MAX_VLAN 64
  45.  
  46. #define XRX200_PCE_ACTVLAN_IDX 0x01
  47. #define XRX200_PCE_VLANMAP_IDX 0x02
  48.  
  49. #define XRX200_MAX_PORT 7
  50. #define XRX200_MAX_DMA 8
  51.  
  52. #define XRX200_HEADROOM 4
  53.  
  54. #define XRX200_TX_TIMEOUT (30 * HZ)
  55.  
  56. /* port type */
  57. #define XRX200_PORT_TYPE_PHY 1
  58. #define XRX200_PORT_TYPE_MAC 2
  59.  
  60. /* DMA */
  61. #define XRX200_DMA_DATA_LEN 0x600
  62. #define XRX200_DMA_TX_ALIGN (32 - 1)
  63.  
  64. #define XRX200_DMA_IRQ INT_NUM_IM2_IRL0
  65. #define XRX200_DMA_RX 0
  66. #define XRX200_DMA_TX 1
  67. #define XRX200_DMA_TX_2 3
  68. #define XRX200_DMA_IS_TX(x) (x%2)
  69. #define XRX200_DMA_IS_RX(x) (!XRX200_DMA_IS_TX(x))
  70.  
  71.  
  72. /* fetch / store dma */
  73. #define FDMA_PCTRL0 0x2A00
  74. #define FDMA_PCTRLx(x) (FDMA_PCTRL0 + (x * 0x18))
  75. #define SDMA_PCTRL0 0x2F00
  76. #define SDMA_PCTRLx(x) (SDMA_PCTRL0 + (x * 0x18))
  77.  
  78. /* buffer management */
  79. #define BM_PCFG0 0x200
  80. #define BM_PCFGx(x) (BM_PCFG0 + (x * 8))
  81.  
  82. /* MDIO */
  83. #define MDIO_GLOB 0x0000
  84. #define MDIO_CTRL 0x0020
  85. #define MDIO_READ 0x0024
  86. #define MDIO_WRITE 0x0028
  87. #define MDIO_PHY0 0x0054
  88. #define MDIO_PHY(x) (0x0054 - (x * sizeof(unsigned)))
  89. #define MDIO_CLK_CFG0 0x002C
  90. #define MDIO_CLK_CFG1 0x0030
  91.  
  92. #define MDIO_GLOB_ENABLE 0x8000
  93. #define MDIO_BUSY BIT(12)
  94. #define MDIO_RD BIT(11)
  95. #define MDIO_WR BIT(10)
  96. #define MDIO_MASK 0x1f
  97. #define MDIO_ADDRSHIFT 5
  98. #define MDIO1_25MHZ 9
  99.  
  100. #define MDIO_PHY_LINK_DOWN 0x4000
  101. #define MDIO_PHY_LINK_UP 0x2000
  102.  
  103. #define MDIO_PHY_SPEED_M10 0x0000
  104. #define MDIO_PHY_SPEED_M100 0x0800
  105. #define MDIO_PHY_SPEED_G1 0x1000
  106.  
  107. #define MDIO_PHY_FDUP_EN 0x0200
  108. #define MDIO_PHY_FDUP_DIS 0x0600
  109.  
  110. #define MDIO_PHY_LINK_MASK 0x6000
  111. #define MDIO_PHY_SPEED_MASK 0x1800
  112. #define MDIO_PHY_FDUP_MASK 0x0600
  113. #define MDIO_PHY_ADDR_MASK 0x001f
  114. #define MDIO_UPDATE_MASK MDIO_PHY_ADDR_MASK | MDIO_PHY_LINK_MASK | \
  115. MDIO_PHY_SPEED_MASK | MDIO_PHY_FDUP_MASK
  116.  
  117. /* MII */
  118. #define MII_CFG(p) (p * 8)
  119.  
  120. #define MII_CFG_EN BIT(14)
  121.  
  122. #define MII_CFG_MODE_MIIP 0x0
  123. #define MII_CFG_MODE_MIIM 0x1
  124. #define MII_CFG_MODE_RMIIP 0x2
  125. #define MII_CFG_MODE_RMIIM 0x3
  126. #define MII_CFG_MODE_RGMII 0x4
  127. #define MII_CFG_MODE_MASK 0xf
  128.  
  129. #define MII_CFG_RATE_M2P5 0x00
  130. #define MII_CFG_RATE_M25 0x10
  131. #define MII_CFG_RATE_M125 0x20
  132. #define MII_CFG_RATE_M50 0x30
  133. #define MII_CFG_RATE_AUTO 0x40
  134. #define MII_CFG_RATE_MASK 0x70
  135.  
  136. /* cpu port mac */
  137. #define PMAC_HD_CTL 0x0000
  138. #define PMAC_RX_IPG 0x0024
  139. #define PMAC_EWAN 0x002c
  140.  
  141. #define PMAC_IPG_MASK 0xf
  142. #define PMAC_HD_CTL_AS 0x0008
  143. #define PMAC_HD_CTL_AC 0x0004
  144. #define PMAC_HD_CTL_RC 0x0010
  145. #define PMAC_HD_CTL_RXSH 0x0040
  146. #define PMAC_HD_CTL_AST 0x0080
  147. #define PMAC_HD_CTL_RST 0x0100
  148.  
  149. /* PCE */
  150. #define PCE_TBL_KEY(x) (0x1100 + ((7 - x) * 4))
  151. #define PCE_TBL_MASK 0x1120
  152. #define PCE_TBL_VAL(x) (0x1124 + ((4 - x) * 4))
  153. #define PCE_TBL_ADDR 0x1138
  154. #define PCE_TBL_CTRL 0x113c
  155. #define PCE_PMAP1 0x114c
  156. #define PCE_PMAP2 0x1150
  157. #define PCE_PMAP3 0x1154
  158. #define PCE_GCTRL_REG(x) (0x1158 + (x * 4))
  159. #define PCE_PCTRL_REG(p, x) (0x1200 + (((p * 0xa) + x) * 4))
  160.  
  161. #define PCE_TBL_BUSY BIT(15)
  162. #define PCE_TBL_CFG_ADDR_MASK 0x1f
  163. #define PCE_TBL_CFG_ADWR 0x20
  164. #define PCE_TBL_CFG_ADWR_MASK 0x60
  165. #define PCE_INGRESS BIT(11)
  166.  
  167. /* MAC */
  168. #define MAC_FLEN_REG (0x2314)
  169. #define MAC_CTRL_REG(p, x) (0x240c + (((p * 0xc) + x) * 4))
  170.  
  171. /* buffer management */
  172. #define BM_PCFG(p) (0x200 + (p * 8))
  173.  
  174. /* special tag in TX path header */
  175. #define SPID_SHIFT 24
  176. #define DPID_SHIFT 16
  177. #define DPID_ENABLE 1
  178. #define SPID_CPU_PORT 2
  179. #define PORT_MAP_SEL BIT(15)
  180. #define PORT_MAP_EN BIT(14)
  181. #define PORT_MAP_SHIFT 1
  182. #define PORT_MAP_MASK 0x3f
  183.  
  184. #define SPPID_MASK 0x7
  185. #define SPPID_SHIFT 4
  186.  
  187. /* MII regs not yet in linux */
  188. #define MDIO_DEVAD_NONE (-1)
  189. #define ADVERTIZE_MPD (1 << 10)
  190.  
  191. struct xrx200_port {
  192. u8 num;
  193. u8 phy_addr;
  194. u16 flags;
  195. phy_interface_t phy_if;
  196.  
  197. int link;
  198. int gpio;
  199. enum of_gpio_flags gpio_flags;
  200.  
  201. struct phy_device *phydev;
  202. struct device_node *phy_node;
  203. };
  204.  
  205. struct xrx200_chan {
  206. /* ring buffer tail pointer */
  207. unsigned tx_free ____cacheline_aligned_in_smp;
  208.  
  209.  
  210. /* skb in use reference */
  211. struct sk_buff *skb[LTQ_DESC_NUM];
  212.  
  213. /* saved dma address for unmap */
  214. dma_addr_t desc_addr[LTQ_DESC_NUM];
  215.  
  216. /* saved length for unmap */
  217. size_t desc_size[LTQ_DESC_NUM];
  218.  
  219. struct napi_struct napi;
  220. struct ltq_dma_channel dma;
  221. struct xrx200_priv *priv;
  222. spinlock_t lock;
  223.  
  224. };
  225.  
  226. struct xrx200_priv {
  227. struct net_device_stats stats;
  228. struct xrx200_chan chan_tx;
  229. struct xrx200_chan chan_rx;
  230.  
  231. struct clk *clk;
  232.  
  233. struct net_device *net_dev;
  234. struct device *dev;
  235.  
  236. struct xrx200_port port[XRX200_MAX_PORT];
  237. int num_port;
  238. bool wan;
  239. bool sw;
  240. unsigned short d_port_map;
  241. unsigned char mac[6];
  242.  
  243. struct mii_bus *mii_bus;
  244.  
  245. u16 vlan_vid[XRX200_MAX_VLAN];
  246. u16 vlan_port_map[XRX200_MAX_VLAN];
  247.  
  248. int port_map[XRX200_MAX_PORT];
  249. unsigned short wan_map;
  250.  
  251. struct switch_dev swdev;
  252. };
  253.  
  254. static __iomem void *xrx200_switch_membase;
  255. static __iomem void *xrx200_mii_membase;
  256. static __iomem void *xrx200_mdio_membase;
  257. static __iomem void *xrx200_pmac_membase;
  258.  
  259. #define ltq_switch_r32(x) ltq_r32(xrx200_switch_membase + (x))
  260. #define ltq_switch_w32(x, y) ltq_w32(x, xrx200_switch_membase + (y))
  261. #define ltq_switch_w32_mask(x, y, z) \
  262. ltq_w32_mask(x, y, xrx200_switch_membase + (z))
  263.  
  264. #define ltq_mdio_r32(x) ltq_r32(xrx200_mdio_membase + (x))
  265. #define ltq_mdio_w32(x, y) ltq_w32(x, xrx200_mdio_membase + (y))
  266. #define ltq_mdio_w32_mask(x, y, z) \
  267. ltq_w32_mask(x, y, xrx200_mdio_membase + (z))
  268.  
  269. #define ltq_mii_r32(x) ltq_r32(xrx200_mii_membase + (x))
  270. #define ltq_mii_w32(x, y) ltq_w32(x, xrx200_mii_membase + (y))
  271. #define ltq_mii_w32_mask(x, y, z) \
  272. ltq_w32_mask(x, y, xrx200_mii_membase + (z))
  273.  
  274. #define ltq_pmac_r32(x) ltq_r32(xrx200_pmac_membase + (x))
  275. #define ltq_pmac_w32(x, y) ltq_w32(x, xrx200_pmac_membase + (y))
  276. #define ltq_pmac_w32_mask(x, y, z) \
  277. ltq_w32_mask(x, y, xrx200_pmac_membase + (z))
  278.  
  279. #define XRX200_GLOBAL_REGATTR(reg) \
  280. .id = reg, \
  281. .type = SWITCH_TYPE_INT, \
  282. .set = xrx200_set_global_attr, \
  283. .get = xrx200_get_global_attr
  284.  
  285. #define XRX200_PORT_REGATTR(reg) \
  286. .id = reg, \
  287. .type = SWITCH_TYPE_INT, \
  288. .set = xrx200_set_port_attr, \
  289. .get = xrx200_get_port_attr
  290.  
  291. static int xrx200sw_read_x(int reg, int x)
  292. {
  293. int value, mask, addr;
  294.  
  295. addr = xrx200sw_reg[reg].offset + (xrx200sw_reg[reg].mult * x);
  296. value = ltq_switch_r32(addr);
  297. mask = (1 << xrx200sw_reg[reg].size) - 1;
  298. value = (value >> xrx200sw_reg[reg].shift);
  299.  
  300. return (value & mask);
  301. }
  302.  
  303. static int xrx200sw_read(int reg)
  304. {
  305. return xrx200sw_read_x(reg, 0);
  306. }
  307.  
  308. static void xrx200sw_write_x(int value, int reg, int x)
  309. {
  310. int mask, addr;
  311.  
  312. addr = xrx200sw_reg[reg].offset + (xrx200sw_reg[reg].mult * x);
  313. mask = (1 << xrx200sw_reg[reg].size) - 1;
  314. mask = (mask << xrx200sw_reg[reg].shift);
  315. value = (value << xrx200sw_reg[reg].shift) & mask;
  316.  
  317. ltq_switch_w32_mask(mask, value, addr);
  318. }
  319.  
  320. static void xrx200sw_write(int value, int reg)
  321. {
  322. xrx200sw_write_x(value, reg, 0);
  323. }
  324.  
  325. struct xrx200_pce_table_entry {
  326. int index; // PCE_TBL_ADDR.ADDR = pData->table_index
  327. int table; // PCE_TBL_CTRL.ADDR = pData->table
  328. unsigned short key[8];
  329. unsigned short val[5];
  330. unsigned short mask;
  331. unsigned short type;
  332. unsigned short valid;
  333. unsigned short gmap;
  334. };
  335.  
  336. static int xrx200_pce_table_entry_read(struct xrx200_pce_table_entry *tbl)
  337. {
  338. // wait until hardware is ready
  339. while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
  340.  
  341. // prepare the table access:
  342. // PCE_TBL_ADDR.ADDR = pData->table_index
  343. xrx200sw_write(tbl->index, XRX200_PCE_TBL_ADDR_ADDR);
  344. // PCE_TBL_CTRL.ADDR = pData->table
  345. xrx200sw_write(tbl->table, XRX200_PCE_TBL_CTRL_ADDR);
  346.  
  347. //(address-based read)
  348. xrx200sw_write(0, XRX200_PCE_TBL_CTRL_OPMOD); // OPMOD_ADRD
  349.  
  350. xrx200sw_write(1, XRX200_PCE_TBL_CTRL_BAS); // start access
  351.  
  352. // wait until hardware is ready
  353. while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
  354.  
  355. // read the keys
  356. tbl->key[7] = xrx200sw_read(XRX200_PCE_TBL_KEY_7);
  357. tbl->key[6] = xrx200sw_read(XRX200_PCE_TBL_KEY_6);
  358. tbl->key[5] = xrx200sw_read(XRX200_PCE_TBL_KEY_5);
  359. tbl->key[4] = xrx200sw_read(XRX200_PCE_TBL_KEY_4);
  360. tbl->key[3] = xrx200sw_read(XRX200_PCE_TBL_KEY_3);
  361. tbl->key[2] = xrx200sw_read(XRX200_PCE_TBL_KEY_2);
  362. tbl->key[1] = xrx200sw_read(XRX200_PCE_TBL_KEY_1);
  363. tbl->key[0] = xrx200sw_read(XRX200_PCE_TBL_KEY_0);
  364.  
  365. // read the values
  366. tbl->val[4] = xrx200sw_read(XRX200_PCE_TBL_VAL_4);
  367. tbl->val[3] = xrx200sw_read(XRX200_PCE_TBL_VAL_3);
  368. tbl->val[2] = xrx200sw_read(XRX200_PCE_TBL_VAL_2);
  369. tbl->val[1] = xrx200sw_read(XRX200_PCE_TBL_VAL_1);
  370. tbl->val[0] = xrx200sw_read(XRX200_PCE_TBL_VAL_0);
  371.  
  372. // read the mask
  373. tbl->mask = xrx200sw_read(XRX200_PCE_TBL_MASK_0);
  374. // read the type
  375. tbl->type = xrx200sw_read(XRX200_PCE_TBL_CTRL_TYPE);
  376. // read the valid flag
  377. tbl->valid = xrx200sw_read(XRX200_PCE_TBL_CTRL_VLD);
  378. // read the group map
  379. tbl->gmap = xrx200sw_read(XRX200_PCE_TBL_CTRL_GMAP);
  380.  
  381. return 0;
  382. }
  383.  
  384. static int xrx200_pce_table_entry_write(struct xrx200_pce_table_entry *tbl)
  385. {
  386. // wait until hardware is ready
  387. while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
  388.  
  389. // prepare the table access:
  390. // PCE_TBL_ADDR.ADDR = pData->table_index
  391. xrx200sw_write(tbl->index, XRX200_PCE_TBL_ADDR_ADDR);
  392. // PCE_TBL_CTRL.ADDR = pData->table
  393. xrx200sw_write(tbl->table, XRX200_PCE_TBL_CTRL_ADDR);
  394.  
  395. //(address-based write)
  396. xrx200sw_write(1, XRX200_PCE_TBL_CTRL_OPMOD); // OPMOD_ADRD
  397.  
  398. // read the keys
  399. xrx200sw_write(tbl->key[7], XRX200_PCE_TBL_KEY_7);
  400. xrx200sw_write(tbl->key[6], XRX200_PCE_TBL_KEY_6);
  401. xrx200sw_write(tbl->key[5], XRX200_PCE_TBL_KEY_5);
  402. xrx200sw_write(tbl->key[4], XRX200_PCE_TBL_KEY_4);
  403. xrx200sw_write(tbl->key[3], XRX200_PCE_TBL_KEY_3);
  404. xrx200sw_write(tbl->key[2], XRX200_PCE_TBL_KEY_2);
  405. xrx200sw_write(tbl->key[1], XRX200_PCE_TBL_KEY_1);
  406. xrx200sw_write(tbl->key[0], XRX200_PCE_TBL_KEY_0);
  407.  
  408. // read the values
  409. xrx200sw_write(tbl->val[4], XRX200_PCE_TBL_VAL_4);
  410. xrx200sw_write(tbl->val[3], XRX200_PCE_TBL_VAL_3);
  411. xrx200sw_write(tbl->val[2], XRX200_PCE_TBL_VAL_2);
  412. xrx200sw_write(tbl->val[1], XRX200_PCE_TBL_VAL_1);
  413. xrx200sw_write(tbl->val[0], XRX200_PCE_TBL_VAL_0);
  414.  
  415. // read the mask
  416. xrx200sw_write(tbl->mask, XRX200_PCE_TBL_MASK_0);
  417. // read the type
  418. xrx200sw_write(tbl->type, XRX200_PCE_TBL_CTRL_TYPE);
  419. // read the valid flag
  420. xrx200sw_write(tbl->valid, XRX200_PCE_TBL_CTRL_VLD);
  421. // read the group map
  422. xrx200sw_write(tbl->gmap, XRX200_PCE_TBL_CTRL_GMAP);
  423.  
  424. xrx200sw_write(1, XRX200_PCE_TBL_CTRL_BAS); // start access
  425.  
  426. // wait until hardware is ready
  427. while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
  428.  
  429. return 0;
  430. }
  431.  
  432. static void xrx200sw_fixup_pvids(void)
  433. {
  434. int index, p, portmap, untagged;
  435. struct xrx200_pce_table_entry tem;
  436. struct xrx200_pce_table_entry tev;
  437.  
  438. portmap = 0;
  439. for (p = 0; p < XRX200_MAX_PORT; p++)
  440. portmap |= BIT(p);
  441.  
  442. tem.table = XRX200_PCE_VLANMAP_IDX;
  443. tev.table = XRX200_PCE_ACTVLAN_IDX;
  444.  
  445. for (index = XRX200_MAX_VLAN; index-- > 0;)
  446. {
  447. tev.index = index;
  448. xrx200_pce_table_entry_read(&tev);
  449.  
  450. if (tev.valid == 0)
  451. continue;
  452.  
  453. tem.index = index;
  454. xrx200_pce_table_entry_read(&tem);
  455.  
  456. if (tem.val[0] == 0)
  457. continue;
  458.  
  459. untagged = portmap & (tem.val[1] ^ tem.val[2]);
  460.  
  461. for (p = 0; p < XRX200_MAX_PORT; p++)
  462. if (untagged & BIT(p))
  463. {
  464. portmap &= ~BIT(p);
  465. xrx200sw_write_x(index, XRX200_PCE_DEFPVID_PVID, p);
  466. }
  467.  
  468. for (p = 0; p < XRX200_MAX_PORT; p++)
  469. if (portmap & BIT(p))
  470. xrx200sw_write_x(index, XRX200_PCE_DEFPVID_PVID, p);
  471. }
  472. }
  473.  
  474. // swconfig interface
  475. static void xrx200_hw_init(struct xrx200_priv *priv);
  476.  
  477. // global
  478. static int xrx200sw_reset_switch(struct switch_dev *dev)
  479. {
  480. struct xrx200_priv *priv = container_of(dev, struct xrx200_priv, swdev);
  481.  
  482. xrx200_hw_init(priv);
  483.  
  484. return 0;
  485. }
  486.  
  487. static int xrx200_set_vlan_mode_enable(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
  488. {
  489. int p;
  490.  
  491. if ((attr->max > 0) && (val->value.i > attr->max))
  492. return -EINVAL;
  493.  
  494. for (p = 0; p < XRX200_MAX_PORT; p++) {
  495. xrx200sw_write_x(val->value.i, XRX200_PCE_VCTRL_VEMR, p);
  496. xrx200sw_write_x(val->value.i, XRX200_PCE_VCTRL_VIMR, p);
  497. }
  498.  
  499. xrx200sw_write(val->value.i, XRX200_PCE_GCTRL_0_VLAN);
  500. return 0;
  501. }
  502.  
  503. static int xrx200_get_vlan_mode_enable(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
  504. {
  505. val->value.i = xrx200sw_read(attr->id);
  506. return 0;
  507. }
  508.  
  509. static int xrx200_set_global_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
  510. {
  511. if ((attr->max > 0) && (val->value.i > attr->max))
  512. return -EINVAL;
  513.  
  514. xrx200sw_write(val->value.i, attr->id);
  515. return 0;
  516. }
  517.  
  518. static int xrx200_get_global_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
  519. {
  520. val->value.i = xrx200sw_read(attr->id);
  521. return 0;
  522. }
  523.  
  524. // vlan
  525. static int xrx200sw_set_vlan_vid(struct switch_dev *dev, const struct switch_attr *attr,
  526. struct switch_val *val)
  527. {
  528. struct xrx200_priv *priv = container_of(dev, struct xrx200_priv, swdev);
  529. int i;
  530. struct xrx200_pce_table_entry tev;
  531. struct xrx200_pce_table_entry tem;
  532.  
  533. tev.table = XRX200_PCE_ACTVLAN_IDX;
  534.  
  535. for (i = 0; i < XRX200_MAX_VLAN; i++)
  536. {
  537. tev.index = i;
  538. xrx200_pce_table_entry_read(&tev);
  539. if (tev.key[0] == val->value.i && i != val->port_vlan)
  540. return -EINVAL;
  541. }
  542.  
  543. priv->vlan_vid[val->port_vlan] = val->value.i;
  544.  
  545. tev.index = val->port_vlan;
  546. xrx200_pce_table_entry_read(&tev);
  547. tev.key[0] = val->value.i;
  548. tev.valid = val->value.i > 0;
  549. xrx200_pce_table_entry_write(&tev);
  550.  
  551. tem.table = XRX200_PCE_VLANMAP_IDX;
  552. tem.index = val->port_vlan;
  553. xrx200_pce_table_entry_read(&tem);
  554. tem.val[0] = val->value.i;
  555. xrx200_pce_table_entry_write(&tem);
  556.  
  557. xrx200sw_fixup_pvids();
  558. return 0;
  559. }
  560.  
  561. static int xrx200sw_get_vlan_vid(struct switch_dev *dev, const struct switch_attr *attr,
  562. struct switch_val *val)
  563. {
  564. struct xrx200_pce_table_entry te;
  565.  
  566. te.table = XRX200_PCE_ACTVLAN_IDX;
  567. te.index = val->port_vlan;
  568. xrx200_pce_table_entry_read(&te);
  569. val->value.i = te.key[0];
  570.  
  571. return 0;
  572. }
  573.  
  574. static int xrx200sw_set_vlan_fid(struct switch_dev *dev, const struct switch_attr *attr,
  575. struct switch_val *val)
  576. {
  577. struct xrx200_pce_table_entry tev;
  578.  
  579. tev.table = XRX200_PCE_ACTVLAN_IDX;
  580.  
  581. tev.index = val->port_vlan;
  582. xrx200_pce_table_entry_read(&tev);
  583. tev.val[0] = val->value.i;
  584. xrx200_pce_table_entry_write(&tev);
  585.  
  586. return 0;
  587. }
  588.  
  589. static int xrx200sw_get_vlan_fid(struct switch_dev *dev, const struct switch_attr *attr,
  590. struct switch_val *val)
  591. {
  592. struct xrx200_pce_table_entry te;
  593.  
  594. te.table = XRX200_PCE_ACTVLAN_IDX;
  595. te.index = val->port_vlan;
  596. xrx200_pce_table_entry_read(&te);
  597. val->value.i = te.val[0];
  598.  
  599. return 0;
  600. }
  601.  
  602.  
  603. static int xrx200sw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val)
  604. {
  605. struct xrx200_priv *priv = container_of(dev, struct xrx200_priv, swdev);
  606. int i, portmap, tagmap, untagged;
  607. struct xrx200_pce_table_entry tem;
  608.  
  609. portmap = 0;
  610. tagmap = 0;
  611. for (i = 0; i < val->len; i++)
  612. {
  613. struct switch_port *p = &val->value.ports[i];
  614.  
  615. portmap |= (1 << p->id);
  616. if (p->flags & (1 << SWITCH_PORT_FLAG_TAGGED))
  617. tagmap |= (1 << p->id);
  618. }
  619.  
  620. tem.table = XRX200_PCE_VLANMAP_IDX;
  621.  
  622. untagged = portmap ^ tagmap;
  623. for (i = 0; i < XRX200_MAX_VLAN; i++)
  624. {
  625. tem.index = i;
  626. xrx200_pce_table_entry_read(&tem);
  627.  
  628. if (tem.val[0] == 0)
  629. continue;
  630.  
  631. if ((untagged & (tem.val[1] ^ tem.val[2])) && (val->port_vlan != i))
  632. return -EINVAL;
  633. }
  634.  
  635. tem.index = val->port_vlan;
  636. xrx200_pce_table_entry_read(&tem);
  637.  
  638. // auto-enable this vlan if not enabled already
  639. if (tem.val[0] == 0)
  640. {
  641. struct switch_val v;
  642. v.port_vlan = val->port_vlan;
  643. v.value.i = val->port_vlan;
  644. if(xrx200sw_set_vlan_vid(dev, NULL, &v))
  645. return -EINVAL;
  646.  
  647. //read updated tem
  648. tem.index = val->port_vlan;
  649. xrx200_pce_table_entry_read(&tem);
  650. }
  651.  
  652. tem.val[1] = portmap;
  653. tem.val[2] = tagmap;
  654. xrx200_pce_table_entry_write(&tem);
  655.  
  656. ltq_switch_w32_mask(0, portmap, PCE_PMAP2);
  657. ltq_switch_w32_mask(0, portmap, PCE_PMAP3);
  658. priv->vlan_port_map[val->port_vlan] = portmap;
  659.  
  660. xrx200sw_fixup_pvids();
  661.  
  662. return 0;
  663. }
  664.  
  665. static int xrx200sw_set_port_pvid(struct switch_dev *dev, int port, int val)
  666. {
  667. int i;
  668. struct xrx200_pce_table_entry tev;
  669.  
  670. if (port >= XRX200_MAX_PORT)
  671. return -EINVAL;
  672.  
  673. tev.table = XRX200_PCE_ACTVLAN_IDX;
  674.  
  675. for (i = 0; i < XRX200_MAX_VLAN; i++)
  676. {
  677. tev.index = i;
  678. xrx200_pce_table_entry_read(&tev);
  679. if (tev.key[0] == val)
  680. {
  681. xrx200sw_write_x(i, XRX200_PCE_DEFPVID_PVID, port);
  682. return 0;
  683. }
  684. }
  685.  
  686. return -EINVAL;
  687. }
  688.  
  689. static int xrx200sw_get_vlan_ports(struct switch_dev *dev, struct switch_val *val)
  690. {
  691. int i;
  692. unsigned short ports, tags;
  693. struct xrx200_pce_table_entry tem;
  694.  
  695. tem.table = XRX200_PCE_VLANMAP_IDX;
  696. tem.index = val->port_vlan;
  697. xrx200_pce_table_entry_read(&tem);
  698.  
  699. ports = tem.val[1];
  700. tags = tem.val[2];
  701.  
  702. for (i = 0; i < XRX200_MAX_PORT; i++) {
  703. struct switch_port *p;
  704.  
  705. if (!(ports & (1 << i)))
  706. continue;
  707.  
  708. p = &val->value.ports[val->len++];
  709. p->id = i;
  710. if (tags & (1 << i))
  711. p->flags = (1 << SWITCH_PORT_FLAG_TAGGED);
  712. else
  713. p->flags = 0;
  714. }
  715.  
  716. return 0;
  717. }
  718.  
  719. static int xrx200sw_set_vlan_enable(struct switch_dev *dev, const struct switch_attr *attr,
  720. struct switch_val *val)
  721. {
  722. struct xrx200_pce_table_entry tev;
  723.  
  724. tev.table = XRX200_PCE_ACTVLAN_IDX;
  725. tev.index = val->port_vlan;
  726. xrx200_pce_table_entry_read(&tev);
  727.  
  728. if (tev.key[0] == 0)
  729. return -EINVAL;
  730.  
  731. tev.valid = val->value.i;
  732. xrx200_pce_table_entry_write(&tev);
  733.  
  734. xrx200sw_fixup_pvids();
  735. return 0;
  736. }
  737.  
  738. static int xrx200sw_get_vlan_enable(struct switch_dev *dev, const struct switch_attr *attr,
  739. struct switch_val *val)
  740. {
  741. struct xrx200_pce_table_entry tev;
  742.  
  743. tev.table = XRX200_PCE_ACTVLAN_IDX;
  744. tev.index = val->port_vlan;
  745. xrx200_pce_table_entry_read(&tev);
  746. val->value.i = tev.valid;
  747.  
  748. return 0;
  749. }
  750.  
  751. // port
  752. static int xrx200sw_get_port_pvid(struct switch_dev *dev, int port, int *val)
  753. {
  754. struct xrx200_pce_table_entry tev;
  755.  
  756. if (port >= XRX200_MAX_PORT)
  757. return -EINVAL;
  758.  
  759. tev.table = XRX200_PCE_ACTVLAN_IDX;
  760. tev.index = xrx200sw_read_x(XRX200_PCE_DEFPVID_PVID, port);
  761. xrx200_pce_table_entry_read(&tev);
  762.  
  763. *val = tev.key[0];
  764. return 0;
  765. }
  766.  
  767. static int xrx200sw_get_port_link(struct switch_dev *dev,
  768. int port,
  769. struct switch_port_link *link)
  770. {
  771. if (port >= XRX200_MAX_PORT)
  772. return -EINVAL;
  773.  
  774. link->link = xrx200sw_read_x(XRX200_MAC_PSTAT_LSTAT, port);
  775. if (!link->link)
  776. return 0;
  777.  
  778. link->duplex = xrx200sw_read_x(XRX200_MAC_PSTAT_FDUP, port);
  779.  
  780.  
  781. // TODO "&&" is bug
  782. // link->rx_flow = !!(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port) && 0x0010);
  783. // link->tx_flow = !!(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port) && 0x0020);
  784. link->rx_flow = !!(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port) & 0x0010);
  785. link->tx_flow = !!(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port) & 0x0020);
  786.  
  787.  
  788. link->aneg = !(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port));
  789.  
  790. link->speed = SWITCH_PORT_SPEED_10;
  791. if (xrx200sw_read_x(XRX200_MAC_PSTAT_MBIT, port))
  792. link->speed = SWITCH_PORT_SPEED_100;
  793. if (xrx200sw_read_x(XRX200_MAC_PSTAT_GBIT, port))
  794. link->speed = SWITCH_PORT_SPEED_1000;
  795.  
  796. return 0;
  797. }
  798.  
  799. static int xrx200_set_port_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
  800. {
  801. if (val->port_vlan >= XRX200_MAX_PORT)
  802. return -EINVAL;
  803.  
  804. if ((attr->max > 0) && (val->value.i > attr->max))
  805. return -EINVAL;
  806.  
  807. xrx200sw_write_x(val->value.i, attr->id, val->port_vlan);
  808. return 0;
  809. }
  810.  
  811. static int xrx200_get_port_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
  812. {
  813. if (val->port_vlan >= XRX200_MAX_PORT)
  814. return -EINVAL;
  815.  
  816. val->value.i = xrx200sw_read_x(attr->id, val->port_vlan);
  817. return 0;
  818. }
  819.  
  820. // attributes
  821. static struct switch_attr xrx200sw_globals[] = {
  822. {
  823. .type = SWITCH_TYPE_INT,
  824. .set = xrx200_set_vlan_mode_enable,
  825. .get = xrx200_get_vlan_mode_enable,
  826. .name = "enable_vlan",
  827. .description = "Enable VLAN mode",
  828. .max = 1},
  829. };
  830.  
  831. static struct switch_attr xrx200sw_port[] = {
  832. {
  833. XRX200_PORT_REGATTR(XRX200_PCE_VCTRL_UVR),
  834. .name = "uvr",
  835. .description = "Unknown VLAN Rule",
  836. .max = 1,
  837. },
  838. {
  839. XRX200_PORT_REGATTR(XRX200_PCE_VCTRL_VSR),
  840. .name = "vsr",
  841. .description = "VLAN Security Rule",
  842. .max = 1,
  843. },
  844. {
  845. XRX200_PORT_REGATTR(XRX200_PCE_VCTRL_VINR),
  846. .name = "vinr",
  847. .description = "VLAN Ingress Tag Rule",
  848. .max = 2,
  849. },
  850. {
  851. XRX200_PORT_REGATTR(XRX200_PCE_PCTRL_0_TVM),
  852. .name = "tvm",
  853. .description = "Transparent VLAN Mode",
  854. .max = 1,
  855. },
  856. };
  857.  
  858. static struct switch_attr xrx200sw_vlan[] = {
  859. {
  860. .type = SWITCH_TYPE_INT,
  861. .name = "fid",
  862. .description = "Filtering Identifier (0-63)",
  863. .set = xrx200sw_set_vlan_fid,
  864. .get = xrx200sw_get_vlan_fid,
  865. .max = 63,
  866. },
  867. {
  868. .type = SWITCH_TYPE_INT,
  869. .name = "vid",
  870. .description = "VLAN ID (0-4094)",
  871. .set = xrx200sw_set_vlan_vid,
  872. .get = xrx200sw_get_vlan_vid,
  873. .max = 4094,
  874. },
  875. {
  876. .type = SWITCH_TYPE_INT,
  877. .name = "enable",
  878. .description = "Enable VLAN",
  879. .set = xrx200sw_set_vlan_enable,
  880. .get = xrx200sw_get_vlan_enable,
  881. .max = 1,
  882. },
  883. };
  884.  
  885. static const struct switch_dev_ops xrx200sw_ops = {
  886. .attr_global = {
  887. .attr = xrx200sw_globals,
  888. .n_attr = ARRAY_SIZE(xrx200sw_globals),
  889. },
  890. .attr_port = {
  891. .attr = xrx200sw_port,
  892. .n_attr = ARRAY_SIZE(xrx200sw_port),
  893. },
  894. .attr_vlan = {
  895. .attr = xrx200sw_vlan,
  896. .n_attr = ARRAY_SIZE(xrx200sw_vlan),
  897. },
  898. .get_vlan_ports = xrx200sw_get_vlan_ports,
  899. .set_vlan_ports = xrx200sw_set_vlan_ports,
  900. .get_port_pvid = xrx200sw_get_port_pvid,
  901. .reset_switch = xrx200sw_reset_switch,
  902. .get_port_link = xrx200sw_get_port_link,
  903. // .get_port_stats = xrx200sw_get_port_stats, //TODO
  904. };
  905.  
  906. static int xrx200sw_init(struct xrx200_priv *priv)
  907. {
  908.  
  909. struct switch_dev *swdev;
  910. if (!priv->sw) {
  911. pr_info("!!!! no switch\n");
  912. return -ENODEV;
  913. }
  914.  
  915. swdev = &priv->swdev;
  916.  
  917. swdev->name = "Lantiq XRX200 Switch";
  918. swdev->vlans = XRX200_MAX_VLAN;
  919. swdev->ports = XRX200_MAX_PORT;
  920. swdev->cpu_port = 6;
  921. swdev->ops = &xrx200sw_ops;
  922.  
  923. register_switch(swdev, priv->net_dev);
  924. return 0; // enough switches
  925. }
  926.  
  927. /* drop all the packets from the DMA ring */
  928. static void xrx200_flush_dma(struct xrx200_chan *ch)
  929. {
  930. int i;
  931.  
  932. for (i = 0; i < LTQ_DESC_NUM; i++) {
  933. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  934.  
  935. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
  936. break;
  937.  
  938. desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
  939. XRX200_DMA_DATA_LEN;
  940. ch->dma.desc++;
  941. ch->dma.desc %= LTQ_DESC_NUM;
  942. }
  943. }
  944.  
  945. static int xrx200_open(struct net_device *dev)
  946. {
  947. struct xrx200_priv *priv = netdev_priv(dev);
  948. int i;
  949.  
  950. // TODO DMA chan allocation seems to be more complex in openwrt version
  951.  
  952. napi_enable(&priv->chan_tx.napi);
  953. ltq_dma_open(&priv->chan_tx.dma);
  954. ltq_dma_enable_irq(&priv->chan_tx.dma);
  955.  
  956. napi_enable(&priv->chan_rx.napi);
  957. ltq_dma_open(&priv->chan_rx.dma);
  958.  
  959. /* The boot loader does not always deactivate the receiving of frames
  960. * on the ports and then some packets queue up in the PPE buffers.
  961. * They already passed the PMAC so they do not have the tags
  962. * configured here. Read the these packets here and drop them.
  963. * The HW should have written them into memory after 10us
  964. */
  965. usleep_range(20, 40);
  966. xrx200_flush_dma(&priv->chan_rx);
  967.  
  968. ltq_dma_enable_irq(&priv->chan_rx.dma);
  969.  
  970. for (i = 0; i < priv->num_port; i++)
  971. if (priv->port[i].phydev)
  972. phy_start(priv->port[i].phydev);
  973.  
  974. netif_wake_queue(dev);
  975.  
  976. return 0;
  977. }
  978.  
  979. static int xrx200_close(struct net_device *dev)
  980. {
  981. struct xrx200_priv *priv = netdev_priv(dev);
  982. int i;
  983.  
  984. netif_stop_queue(dev);
  985.  
  986. for (i = 0; i < priv->num_port; i++)
  987. if (priv->port[i].phydev)
  988. phy_stop(priv->port[i].phydev);
  989.  
  990. napi_disable(&priv->chan_rx.napi);
  991. ltq_dma_close(&priv->chan_rx.dma);
  992.  
  993. napi_disable(&priv->chan_tx.napi);
  994. ltq_dma_close(&priv->chan_tx.dma);
  995.  
  996. return 0;
  997. }
  998.  
  999. static int xrx200_alloc_skb(struct xrx200_chan *ch)
  1000. {
  1001. #define DMA_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  1002. ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->priv->net_dev, XRX200_DMA_DATA_LEN + DMA_PAD);
  1003. if (!ch->skb[ch->dma.desc])
  1004. goto skip;
  1005.  
  1006. skb_reserve(ch->skb[ch->dma.desc], NET_SKB_PAD);
  1007.  
  1008. dma_unmap_single(ch->priv->dev, ch->dma.desc_base[ch->dma.desc].addr, XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
  1009.  
  1010. ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
  1011. ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
  1012. DMA_FROM_DEVICE);
  1013. ch->dma.desc_base[ch->dma.desc].addr =
  1014. CPHYSADDR(ch->skb[ch->dma.desc]->data);
  1015. skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
  1016.  
  1017. skip:
  1018. ch->dma.desc_base[ch->dma.desc].ctl =
  1019. LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
  1020. XRX200_DMA_DATA_LEN;
  1021.  
  1022. return 0;
  1023. }
  1024.  
  1025. static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
  1026. {
  1027. struct xrx200_priv *priv = ch->priv;
  1028. struct net_device *dev = priv->net_dev;
  1029. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  1030. struct sk_buff *skb = ch->skb[ch->dma.desc];
  1031. int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
  1032. int ret;
  1033.  
  1034. ret = xrx200_alloc_skb(ch);
  1035.  
  1036. ch->dma.desc++;
  1037. ch->dma.desc %= LTQ_DESC_NUM;
  1038.  
  1039. if (ret) {
  1040. netdev_err(dev,
  1041. "failed to allocate new rx buffer\n");
  1042. return;
  1043. }
  1044.  
  1045. skb_put(skb, len);
  1046. #ifdef SW_ROUTING
  1047. skb_pull(skb, 8);
  1048. #endif
  1049.  
  1050. skb->dev = dev;
  1051. skb->protocol = eth_type_trans(skb, dev);
  1052. netif_receive_skb(skb);
  1053. dev->stats.rx_packets++;
  1054. dev->stats.rx_bytes+=len;
  1055. }
  1056.  
  1057. static int xrx200_poll_rx(struct napi_struct *napi, int budget)
  1058. {
  1059. struct xrx200_chan *ch = container_of(napi,
  1060. struct xrx200_chan, napi);
  1061. struct xrx200_priv *priv = ch->priv;
  1062. int rx = 0;
  1063.  
  1064. while (rx < budget) {
  1065. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  1066. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  1067. #ifdef SW_ROUTING
  1068. struct sk_buff *skb = ch->skb[ch->dma.desc];
  1069. u8 *special_tag = (u8*)skb->data;
  1070. int port = (special_tag[7] >> SPPID_SHIFT) & SPPID_MASK;
  1071. xrx200_hw_receive(ch, priv->port_map[port]);
  1072. #else
  1073. xrx200_hw_receive(ch, 0);
  1074. #endif
  1075. rx++;
  1076. } else {
  1077. break;
  1078. }
  1079. }
  1080.  
  1081. if (rx < budget) {
  1082. if (napi_complete_done(&ch->napi, rx)) {
  1083. //can an unacked irq event wait here now?
  1084. ltq_dma_enable_irq(&ch->dma);
  1085. }
  1086. }
  1087.  
  1088. return rx;
  1089. }
  1090.  
  1091.  
  1092. static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
  1093. {
  1094. struct xrx200_priv *priv = netdev_priv(dev);
  1095.  
  1096. return &priv->stats;
  1097. }
  1098.  
  1099. #define TX_BUFFS_AVAIL(tail, head) \
  1100. ((tail <= head) ? \
  1101. tail + (LTQ_DESC_NUM - 1) - head : \
  1102. tail - head - 1)
  1103.  
  1104. static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
  1105. {
  1106. struct xrx200_chan *ch = container_of(napi,
  1107. struct xrx200_chan, napi);
  1108. struct net_device *net_dev = ch->priv->net_dev;
  1109. int pkts = 0;
  1110. unsigned long bytes = 0;
  1111. unsigned long flags;
  1112.  
  1113. spin_lock_irqsave(&ch->lock, flags);
  1114.  
  1115. while (1) {
  1116. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
  1117.  
  1118. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  1119. struct sk_buff *skb = ch->skb[ch->tx_free];
  1120.  
  1121. bytes += ch->desc_size[ch->tx_free];
  1122. ch->skb[ch->tx_free] = NULL;
  1123.  
  1124. dma_unmap_single(ch->priv->dev, ch->desc_addr[ch->tx_free],
  1125. ch->desc_size[ch->tx_free], DMA_TO_DEVICE);
  1126.  
  1127. /* Consume skb only at last fragment */
  1128. if (desc->ctl & LTQ_DMA_EOP) {
  1129. dev_consume_skb_irq(skb);
  1130. pkts++;
  1131. }
  1132.  
  1133. memset(desc, 0, sizeof(struct ltq_dma_desc));
  1134. ch->tx_free = (ch->tx_free + 1) % LTQ_DESC_NUM;
  1135. } else {
  1136. break;
  1137. }
  1138. }
  1139.  
  1140. spin_unlock_irqrestore(&ch->lock, flags);
  1141.  
  1142. net_dev->stats.tx_packets += pkts;
  1143. net_dev->stats.tx_bytes += bytes;
  1144.  
  1145. // HACK, free all descriptors, even over budget (else there will be queue stalls, slow CPU)
  1146. pkts = pkts ? (budget - 1) : 0;
  1147.  
  1148. if (pkts < budget) {
  1149. if (napi_complete_done(&ch->napi, pkts)) {
  1150. ltq_dma_enable_irq(&ch->dma);
  1151. }
  1152. }
  1153.  
  1154. if (netif_queue_stopped(net_dev)) {
  1155. if (unlikely(TX_BUFFS_AVAIL(ch->tx_free, ch->dma.desc) > (MAX_SKB_FRAGS + 1))) {
  1156. netif_wake_queue(net_dev);
  1157. }
  1158. }
  1159.  
  1160. return pkts;
  1161. }
  1162.  
  1163. static void xrx200_tx_timeout(struct net_device *dev)
  1164. {
  1165. struct xrx200_priv *priv = netdev_priv(dev);
  1166.  
  1167. pr_err("%s: transmit timed out!\n", dev->name);
  1168.  
  1169. priv->stats.tx_errors++;
  1170.  
  1171. ltq_dma_enable_irq(&priv->chan_tx.dma); //TODO necessary?
  1172.  
  1173. if (netif_queue_stopped(dev)) {
  1174. netif_wake_queue(dev);
  1175. } else {
  1176. pr_warn("%s: high transmit load\n", dev->name);
  1177. }
  1178. }
  1179.  
  1180. static void xrx200_unwind_mapped_tx_skb(struct xrx200_chan *ch, int tail, int head) {
  1181.  
  1182. for (; tail != head; tail = (tail + 1) % LTQ_DESC_NUM) {
  1183. dma_unmap_single(ch->priv->dev, ch->desc_addr[tail],
  1184. ch->desc_size[tail], DMA_TO_DEVICE);
  1185. }
  1186. }
  1187.  
  1188. static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1189. {
  1190. struct xrx200_priv *priv = netdev_priv(dev);
  1191. struct xrx200_chan *ch = &priv->chan_tx;
  1192.  
  1193. struct ltq_dma_desc *desc;
  1194. int ret = NETDEV_TX_OK;
  1195. int len;
  1196. int i;
  1197. unsigned long flags;
  1198. dma_addr_t mapping;
  1199. #ifdef SW_ROUTING
  1200. u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
  1201. #endif
  1202.  
  1203. if (skb_put_padto(skb, ETH_ZLEN)) {
  1204. dev->stats.tx_dropped++;
  1205. return NETDEV_TX_OK;
  1206. }
  1207.  
  1208. #ifdef SW_ROUTING
  1209. if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
  1210. u16 port_map = priv->d_port_map;
  1211.  
  1212. if (priv->sw && skb->protocol == htons(ETH_P_8021Q)) {
  1213. u16 vid;
  1214. int i;
  1215.  
  1216. port_map = 0;
  1217. if (!__vlan_get_tag(skb, &vid)) {
  1218. for (i = 0; i < XRX200_MAX_VLAN; i++) {
  1219. if (priv->vlan_vid[i] == vid) {
  1220. port_map = priv->vlan_port_map[i];
  1221. break;
  1222. }
  1223. }
  1224. }
  1225. }
  1226.  
  1227. special_tag |= (port_map << PORT_MAP_SHIFT) |
  1228. PORT_MAP_SEL | PORT_MAP_EN;
  1229. }
  1230.  
  1231. if(priv->wan)
  1232. special_tag |= (1 << DPID_SHIFT);
  1233.  
  1234. if(skb_headroom(skb) < 4) {
  1235. struct sk_buff *tmp = skb_realloc_headroom(skb, 4);
  1236. dev_kfree_skb_any(skb);
  1237. skb = tmp;
  1238. }
  1239.  
  1240. skb_push(skb, 4);
  1241. memcpy(skb->data, &special_tag, sizeof(u32));
  1242. #endif
  1243.  
  1244. // if (TX_BUFFS_AVAIL(ch->tx_free, ch->dma.desc) <= (skb_shinfo(skb)->nr_frags + 1)) {
  1245. if (TX_BUFFS_AVAIL(ch->tx_free, ch->dma.desc) <= (MAX_SKB_FRAGS + 1)) {
  1246. netif_stop_queue(dev);
  1247. netdev_err(dev, "not enough TX ring space\n");
  1248. return NETDEV_TX_BUSY;
  1249. }
  1250. #if 0
  1251. desc = &ch->dma.desc_base[ch->dma.desc];
  1252. if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
  1253. netif_stop_queue(dev);
  1254. netdev_err(dev, "tx ring full before send\n");
  1255. return NETDEV_TX_BUSY;
  1256. }
  1257. #endif
  1258. spin_lock_irqsave(&ch->lock, flags);
  1259.  
  1260. /* Send first fragment */
  1261. ch->skb[ch->dma.desc] = skb;
  1262. desc = &ch->dma.desc_base[ch->dma.desc];
  1263.  
  1264. if (skb_shinfo(skb)->nr_frags == 0) {
  1265. len = skb->len;
  1266. } else {
  1267. len = skb_headlen(skb);
  1268. }
  1269.  
  1270. mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
  1271. if (unlikely(dma_mapping_error(priv->dev, mapping))) {
  1272. dev_kfree_skb(skb);
  1273. netdev_err(dev, "DMA mapping failed\n");
  1274. dev->stats.tx_dropped++;
  1275. dev->stats.tx_errors++;
  1276. ret = NETDEV_TX_OK;
  1277. goto out;
  1278. }
  1279.  
  1280. ch->desc_addr[ch->dma.desc] = mapping;
  1281. ch->desc_size[ch->dma.desc] = len;
  1282.  
  1283. desc->addr = (mapping & 0x1fffffe0) | (1<<31);
  1284.  
  1285. /* Don't set LTQ_DMA_OWN before filling all fragments descriptors */
  1286. desc->ctl = LTQ_DMA_SOP | LTQ_DMA_TX_OFFSET(mapping & XRX200_DMA_TX_ALIGN)
  1287. | (len & LTQ_DMA_SIZE_MASK);
  1288.  
  1289. if (!skb_shinfo(skb)->nr_frags)
  1290. desc->ctl |= LTQ_DMA_EOP;
  1291.  
  1292. /* Send rest of fragments */
  1293. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  1294.  
  1295. //TODO saving info for fragments
  1296. ch->skb[(ch->dma.desc + i + 1) % LTQ_DESC_NUM ] = skb;
  1297. desc = &ch->dma.desc_base[(ch->dma.desc + i + 1) % LTQ_DESC_NUM];
  1298.  
  1299. len = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  1300.  
  1301. mapping = dma_map_single(priv->dev, skb_frag_address(& skb_shinfo(skb)->frags[i]), len, DMA_TO_DEVICE);
  1302. if (unlikely(dma_mapping_error(priv->dev, mapping))) {
  1303.  
  1304. xrx200_unwind_mapped_tx_skb(ch, ch->dma.desc, ch->dma.desc + i + 1);
  1305.  
  1306. netdev_err(dev, "DMA mapping for fragment failed\n");
  1307. dev_kfree_skb(skb);
  1308. dev->stats.tx_dropped++;
  1309. dev->stats.tx_errors++;
  1310. ret = NETDEV_TX_OK;
  1311. goto out;
  1312. }
  1313.  
  1314. ch->desc_addr[ch->dma.desc + i + 1] = mapping;
  1315. ch->desc_size[ch->dma.desc + i + 1] = len;
  1316.  
  1317. desc->addr = (mapping & 0x1fffffe0) | (1<<31);
  1318.  
  1319. desc->ctl = LTQ_DMA_OWN |
  1320. LTQ_DMA_TX_OFFSET(mapping & XRX200_DMA_TX_ALIGN) | (len & LTQ_DMA_SIZE_MASK);
  1321.  
  1322. if (i == (skb_shinfo(skb)->nr_frags - 1))
  1323. desc->ctl |= LTQ_DMA_EOP;
  1324. }
  1325.  
  1326. desc = &ch->dma.desc_base[ch->dma.desc];
  1327.  
  1328. /* Increment TX ring index */
  1329. ch->dma.desc = (ch->dma.desc + skb_shinfo(skb)->nr_frags + 1) % LTQ_DESC_NUM;
  1330.  
  1331. wmb();
  1332.  
  1333. /* Start TX DMA */
  1334. desc->ctl |= LTQ_DMA_OWN;
  1335.  
  1336. if (unlikely(TX_BUFFS_AVAIL(ch->tx_free, ch->dma.desc) <= (MAX_SKB_FRAGS + 1))) {
  1337. netif_stop_queue(dev);
  1338. }
  1339.  
  1340. skb_tx_timestamp(skb);
  1341.  
  1342. out:
  1343. spin_unlock_irqrestore(&ch->lock, flags);
  1344.  
  1345. return ret;
  1346. }
  1347.  
  1348. static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
  1349. {
  1350. struct xrx200_chan *ch = ptr;
  1351.  
  1352. ltq_dma_disable_irq(&ch->dma);
  1353. ltq_dma_ack_irq(&ch->dma);
  1354. napi_schedule(&ch->napi);
  1355.  
  1356. return IRQ_HANDLED;
  1357. }
  1358.  
  1359. static int xrx200_dma_init(struct xrx200_priv *priv)
  1360. {
  1361. int i;
  1362. struct xrx200_chan *ch_rx = &priv->chan_rx;
  1363. struct xrx200_chan *ch_tx = &priv->chan_tx;
  1364. int ret;
  1365.  
  1366. ltq_dma_init_port(DMA_PORT_ETOP);
  1367.  
  1368. ch_rx->dma.nr = XRX200_DMA_RX;
  1369. ch_rx->priv = priv;
  1370.  
  1371. ltq_dma_alloc_rx(&ch_rx->dma);
  1372. for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
  1373. ch_rx->dma.desc++) {
  1374. ret = xrx200_alloc_skb(ch_rx);
  1375. if (ret)
  1376. goto rx_free;
  1377. }
  1378. ch_rx->dma.desc = 0;
  1379.  
  1380. spin_lock_init(&ch_rx->lock);
  1381. spin_lock_init(&ch_tx->lock);
  1382.  
  1383.  
  1384. ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
  1385. "vrx200_rx", &priv->chan_rx);
  1386. if (ret) {
  1387. dev_err(priv->dev, "failed to request RX irq %d\n",
  1388. ch_rx->dma.irq);
  1389. goto rx_ring_free;
  1390. }
  1391.  
  1392. ch_tx->dma.nr = XRX200_DMA_TX;
  1393. ch_tx->priv = priv;
  1394.  
  1395. ltq_dma_alloc_tx(&ch_tx->dma);
  1396. ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
  1397. "vrx200_tx", &priv->chan_tx);
  1398. if (ret) {
  1399. dev_err(priv->dev, "failed to request TX irq %d\n",
  1400. ch_tx->dma.irq);
  1401. goto tx_free;
  1402. }
  1403.  
  1404. return ret;
  1405.  
  1406. tx_free:
  1407. ltq_dma_free(&ch_tx->dma);
  1408.  
  1409. rx_ring_free:
  1410. /* free the allocated RX ring */
  1411. for (i = 0; i < LTQ_DESC_NUM; i++) {
  1412. if (priv->chan_rx.skb[i])
  1413. dev_kfree_skb_any(priv->chan_rx.skb[i]);
  1414. }
  1415.  
  1416. rx_free:
  1417. ltq_dma_free(&ch_rx->dma);
  1418. return ret;
  1419. }
  1420.  
  1421. #ifdef SW_POLLING
  1422. static void xrx200_gmac_update(struct xrx200_port *port)
  1423. {
  1424. u16 phyaddr = port->phydev->mdio.addr & MDIO_PHY_ADDR_MASK;
  1425. u16 miimode = ltq_mii_r32(MII_CFG(port->num)) & MII_CFG_MODE_MASK;
  1426. u16 miirate = 0;
  1427.  
  1428. switch (port->phydev->speed) {
  1429. case SPEED_1000:
  1430. phyaddr |= MDIO_PHY_SPEED_G1;
  1431. miirate = MII_CFG_RATE_M125;
  1432. break;
  1433.  
  1434. case SPEED_100:
  1435. phyaddr |= MDIO_PHY_SPEED_M100;
  1436. switch (miimode) {
  1437. case MII_CFG_MODE_RMIIM:
  1438. case MII_CFG_MODE_RMIIP:
  1439. miirate = MII_CFG_RATE_M50;
  1440. break;
  1441. default:
  1442. miirate = MII_CFG_RATE_M25;
  1443. break;
  1444. }
  1445. break;
  1446.  
  1447. default:
  1448. phyaddr |= MDIO_PHY_SPEED_M10;
  1449. miirate = MII_CFG_RATE_M2P5;
  1450. break;
  1451. }
  1452.  
  1453. if (port->phydev->link)
  1454. phyaddr |= MDIO_PHY_LINK_UP;
  1455. else
  1456. phyaddr |= MDIO_PHY_LINK_DOWN;
  1457.  
  1458. if (port->phydev->duplex == DUPLEX_FULL)
  1459. phyaddr |= MDIO_PHY_FDUP_EN;
  1460. else
  1461. phyaddr |= MDIO_PHY_FDUP_DIS;
  1462.  
  1463. ltq_mdio_w32_mask(MDIO_UPDATE_MASK, phyaddr, MDIO_PHY(port->num));
  1464. ltq_mii_w32_mask(MII_CFG_RATE_MASK, miirate, MII_CFG(port->num));
  1465. udelay(1);
  1466. }
  1467. #else
  1468. static void xrx200_gmac_update(struct xrx200_port *port)
  1469. {
  1470.  
  1471. }
  1472. #endif
  1473.  
  1474. static void xrx200_mdio_link(struct net_device *dev)
  1475. {
  1476. struct xrx200_priv *priv = netdev_priv(dev);
  1477. int i;
  1478.  
  1479. for (i = 0; i < priv->num_port; i++) {
  1480. if (!priv->port[i].phydev)
  1481. continue;
  1482.  
  1483. if (priv->port[i].link != priv->port[i].phydev->link) {
  1484. xrx200_gmac_update(&priv->port[i]);
  1485. priv->port[i].link = priv->port[i].phydev->link;
  1486. netdev_info(dev, "port %d %s link\n",
  1487. priv->port[i].num,
  1488. (priv->port[i].link)?("got"):("lost"));
  1489. }
  1490. }
  1491. }
  1492.  
  1493. static inline int xrx200_mdio_poll(struct mii_bus *bus)
  1494. {
  1495. unsigned cnt = 10000;
  1496.  
  1497. while (likely(cnt--)) {
  1498. unsigned ctrl = ltq_mdio_r32(MDIO_CTRL);
  1499. if ((ctrl & MDIO_BUSY) == 0)
  1500. return 0;
  1501. }
  1502.  
  1503. return 1;
  1504. }
  1505.  
  1506. static int xrx200_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
  1507. {
  1508. if (xrx200_mdio_poll(bus))
  1509. return 1;
  1510.  
  1511. ltq_mdio_w32(val, MDIO_WRITE);
  1512. ltq_mdio_w32(MDIO_BUSY | MDIO_WR |
  1513. ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
  1514. (reg & MDIO_MASK),
  1515. MDIO_CTRL);
  1516.  
  1517. return 0;
  1518. }
  1519.  
  1520. static int xrx200_mdio_rd(struct mii_bus *bus, int addr, int reg)
  1521. {
  1522. if (xrx200_mdio_poll(bus))
  1523. return -1;
  1524.  
  1525. ltq_mdio_w32(MDIO_BUSY | MDIO_RD |
  1526. ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
  1527. (reg & MDIO_MASK),
  1528. MDIO_CTRL);
  1529.  
  1530. if (xrx200_mdio_poll(bus))
  1531. return -1;
  1532.  
  1533. return ltq_mdio_r32(MDIO_READ);
  1534. }
  1535.  
  1536. static int xrx200_phy_has_link(struct net_device *dev)
  1537. {
  1538. struct xrx200_priv *priv = netdev_priv(dev);
  1539. int i;
  1540.  
  1541. for (i = 0; i < priv->num_port; i++) {
  1542. if (!priv->port[i].phydev)
  1543. continue;
  1544.  
  1545. if (priv->port[i].phydev->link)
  1546. return 1;
  1547. }
  1548.  
  1549. return 0;
  1550. }
  1551.  
  1552. static void xrx200_phy_link_change(struct phy_device *phydev, bool up, bool do_carrier)
  1553. {
  1554. struct net_device *netdev = phydev->attached_dev;
  1555.  
  1556. if (do_carrier) {
  1557. if (up)
  1558. netif_carrier_on(netdev);
  1559. else if (!xrx200_phy_has_link(netdev))
  1560. netif_carrier_off(netdev);
  1561. }
  1562.  
  1563. phydev->adjust_link(netdev);
  1564. }
  1565.  
  1566. static int xrx200_mdio_probe(struct net_device *dev, struct xrx200_port *port)
  1567. {
  1568. struct xrx200_priv *priv = netdev_priv(dev);
  1569. struct phy_device *phydev = NULL;
  1570. unsigned val;
  1571.  
  1572. if (of_phy_is_fixed_link(port->phy_node)) {
  1573. netdev_info(dev, "Connect as fixed link.\n");
  1574. phydev = of_phy_connect(dev, port->phy_node, &xrx200_mdio_link, 0,
  1575. port->phy_if);
  1576.  
  1577. if (IS_ERR(phydev)) {
  1578. netdev_err(dev, "Could not attach to PHY\n");
  1579. return PTR_ERR(phydev);
  1580. }
  1581. }else{
  1582. netdev_info(dev, "Connect as common phy link.\n");
  1583. phydev = mdiobus_get_phy(priv->mii_bus, port->phy_addr);
  1584. if (!phydev) {
  1585. netdev_err(dev, "no PHY found\n");
  1586. return -ENODEV;
  1587. }
  1588.  
  1589. phydev = phy_connect(dev, phydev_name(phydev), &xrx200_mdio_link,
  1590. port->phy_if);
  1591.  
  1592. if (IS_ERR(phydev)) {
  1593. netdev_err(dev, "Could not attach to PHY\n");
  1594. return PTR_ERR(phydev);
  1595. }
  1596.  
  1597. phydev->supported &= (SUPPORTED_10baseT_Half
  1598. | SUPPORTED_10baseT_Full
  1599. | SUPPORTED_100baseT_Half
  1600. | SUPPORTED_100baseT_Full
  1601. | SUPPORTED_1000baseT_Half
  1602. | SUPPORTED_1000baseT_Full
  1603. | SUPPORTED_Autoneg
  1604. | SUPPORTED_MII
  1605. | SUPPORTED_TP);
  1606. phydev->advertising = phydev->supported;
  1607. }
  1608. port->phydev = phydev;
  1609. phydev->phy_link_change = xrx200_phy_link_change;
  1610.  
  1611. phy_attached_info(phydev);
  1612.  
  1613. #ifdef SW_POLLING
  1614. phy_read_status(phydev);
  1615.  
  1616. val = xrx200_mdio_rd(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
  1617. val |= ADVERTIZE_MPD;
  1618. xrx200_mdio_wr(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
  1619. xrx200_mdio_wr(priv->mii_bus, 0, 0, 0x1040);
  1620.  
  1621. phy_start_aneg(phydev);
  1622. #endif
  1623. return 0;
  1624. }
  1625.  
  1626. static void xrx200_port_config(struct xrx200_priv *priv,
  1627. const struct xrx200_port *port)
  1628. {
  1629. u16 miimode = 0;
  1630.  
  1631. switch (port->num) {
  1632. case 0: /* xMII0 */
  1633. case 1: /* xMII1 */
  1634. switch (port->phy_if) {
  1635. case PHY_INTERFACE_MODE_MII:
  1636. if (port->flags & XRX200_PORT_TYPE_PHY)
  1637. /* MII MAC mode, connected to external PHY */
  1638. miimode = MII_CFG_MODE_MIIM;
  1639. else
  1640. /* MII PHY mode, connected to external MAC */
  1641. miimode = MII_CFG_MODE_MIIP;
  1642. break;
  1643. case PHY_INTERFACE_MODE_RMII:
  1644. if (port->flags & XRX200_PORT_TYPE_PHY)
  1645. /* RMII MAC mode, connected to external PHY */
  1646. miimode = MII_CFG_MODE_RMIIM;
  1647. else
  1648. /* RMII PHY mode, connected to external MAC */
  1649. miimode = MII_CFG_MODE_RMIIP;
  1650. break;
  1651. case PHY_INTERFACE_MODE_RGMII:
  1652. /* RGMII MAC mode, connected to external PHY */
  1653. miimode = MII_CFG_MODE_RGMII;
  1654. break;
  1655. default:
  1656. break;
  1657. }
  1658. break;
  1659. case 2: /* internal GPHY0 */
  1660. case 3: /* internal GPHY0 */
  1661. case 4: /* internal GPHY1 */
  1662. switch (port->phy_if) {
  1663. case PHY_INTERFACE_MODE_MII:
  1664. case PHY_INTERFACE_MODE_GMII:
  1665. /* MII MAC mode, connected to internal GPHY */
  1666. miimode = MII_CFG_MODE_MIIM;
  1667. break;
  1668. default:
  1669. break;
  1670. }
  1671. break;
  1672. case 5: /* internal GPHY1 or xMII2 */
  1673. switch (port->phy_if) {
  1674. case PHY_INTERFACE_MODE_MII:
  1675. /* MII MAC mode, connected to internal GPHY */
  1676. miimode = MII_CFG_MODE_MIIM;
  1677. break;
  1678. case PHY_INTERFACE_MODE_RGMII:
  1679. /* RGMII MAC mode, connected to external PHY */
  1680. miimode = MII_CFG_MODE_RGMII;
  1681. break;
  1682. default:
  1683. break;
  1684. }
  1685. break;
  1686. default:
  1687. break;
  1688. }
  1689.  
  1690. ltq_mii_w32_mask(MII_CFG_MODE_MASK, miimode | MII_CFG_EN,
  1691. MII_CFG(port->num));
  1692. }
  1693.  
  1694. static int xrx200_init(struct net_device *dev)
  1695. {
  1696. struct xrx200_priv *priv = netdev_priv(dev);
  1697. struct sockaddr mac;
  1698. int err, i;
  1699.  
  1700. #ifndef SW_POLLING
  1701. unsigned int reg = 0;
  1702.  
  1703. /* enable auto polling */
  1704. for (i = 0; i < priv->num_port; i++)
  1705. reg |= BIT(priv->port[i].num);
  1706. ltq_mdio_w32(reg, MDIO_CLK_CFG0);
  1707. ltq_mdio_w32(MDIO1_25MHZ, MDIO_CLK_CFG1);
  1708. #endif
  1709.  
  1710. /* setup each port */
  1711. for (i = 0; i < priv->num_port; i++)
  1712. xrx200_port_config(priv, &priv->port[i]);
  1713.  
  1714. memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
  1715. if (!is_valid_ether_addr(mac.sa_data)) {
  1716. pr_warn("net-xrx200: invalid MAC, using random\n");
  1717. eth_random_addr(mac.sa_data);
  1718. dev->addr_assign_type |= NET_ADDR_RANDOM;
  1719. }
  1720.  
  1721. err = eth_mac_addr(dev, &mac);
  1722. if (err)
  1723. goto err_netdev;
  1724.  
  1725. for (i = 0; i < priv->num_port; i++)
  1726. if (xrx200_mdio_probe(dev, &priv->port[i]))
  1727. pr_warn("xrx200-mdio: probing phy of port %d failed\n",
  1728. priv->port[i].num);
  1729.  
  1730. return 0;
  1731.  
  1732. err_netdev:
  1733. unregister_netdev(dev);
  1734. free_netdev(dev);
  1735. return err;
  1736. }
  1737.  
  1738. static void xrx200_pci_microcode(void)
  1739. {
  1740. int i;
  1741.  
  1742. ltq_switch_w32_mask(PCE_TBL_CFG_ADDR_MASK | PCE_TBL_CFG_ADWR_MASK,
  1743. PCE_TBL_CFG_ADWR, PCE_TBL_CTRL);
  1744. ltq_switch_w32(0, PCE_TBL_MASK);
  1745.  
  1746. for (i = 0; i < ARRAY_SIZE(pce_microcode); i++) {
  1747. ltq_switch_w32(i, PCE_TBL_ADDR);
  1748. ltq_switch_w32(pce_microcode[i].val[3], PCE_TBL_VAL(0));
  1749. ltq_switch_w32(pce_microcode[i].val[2], PCE_TBL_VAL(1));
  1750. ltq_switch_w32(pce_microcode[i].val[1], PCE_TBL_VAL(2));
  1751. ltq_switch_w32(pce_microcode[i].val[0], PCE_TBL_VAL(3));
  1752.  
  1753. // start the table access:
  1754. ltq_switch_w32_mask(0, PCE_TBL_BUSY, PCE_TBL_CTRL);
  1755. while (ltq_switch_r32(PCE_TBL_CTRL) & PCE_TBL_BUSY);
  1756. }
  1757.  
  1758. /* tell the switch that the microcode is loaded */
  1759. ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
  1760. }
  1761.  
  1762. static void xrx200_hw_init(struct xrx200_priv *priv)
  1763. {
  1764. int i;
  1765.  
  1766. /* enable clock gate */
  1767. clk_enable(priv->clk);
  1768.  
  1769. ltq_switch_w32(1, 0);
  1770. mdelay(100);
  1771. ltq_switch_w32(0, 0);
  1772. /*
  1773. * TODO: we should really disbale all phys/miis here and explicitly
  1774. * enable them in the device secific init function
  1775. */
  1776.  
  1777. /* disable port fetch/store dma */
  1778. for (i = 0; i < 7; i++ ) {
  1779. ltq_switch_w32(0, FDMA_PCTRLx(i));
  1780. ltq_switch_w32(0, SDMA_PCTRLx(i));
  1781. }
  1782.  
  1783. /* enable Switch */
  1784. ltq_mdio_w32_mask(0, MDIO_GLOB_ENABLE, MDIO_GLOB);
  1785.  
  1786. /* load the pce microcode */
  1787. xrx200_pci_microcode();
  1788.  
  1789. /* Default unknown Broadcat/Multicast/Unicast port maps */
  1790. ltq_switch_w32(0x40, PCE_PMAP1);
  1791. ltq_switch_w32(0x40, PCE_PMAP2);
  1792. ltq_switch_w32(0x40, PCE_PMAP3);
  1793.  
  1794. /* RMON Counter Enable for all physical ports */
  1795. for (i = 0; i < 7; i++)
  1796. ltq_switch_w32(0x1, BM_PCFG(i));
  1797.  
  1798. /* disable auto polling */
  1799. ltq_mdio_w32(0x0, MDIO_CLK_CFG0);
  1800.  
  1801. /* enable port statistic counters */
  1802. for (i = 0; i < 7; i++)
  1803. ltq_switch_w32(0x1, BM_PCFGx(i));
  1804.  
  1805. /* set IPG to 12 */
  1806. ltq_pmac_w32_mask(PMAC_IPG_MASK, 0xb, PMAC_RX_IPG);
  1807.  
  1808. #ifdef SW_ROUTING
  1809. /* enable status header, enable CRC */
  1810. ltq_pmac_w32_mask(0,
  1811. PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
  1812. PMAC_HD_CTL);
  1813. #else
  1814. /* disable status header, enable CRC */
  1815. ltq_pmac_w32_mask(PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS,
  1816. PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
  1817. PMAC_HD_CTL);
  1818. #endif
  1819.  
  1820. /* enable port fetch/store dma & VLAN Modification */
  1821. for (i = 0; i < 7; i++ ) {
  1822. ltq_switch_w32_mask(0, 0x19, FDMA_PCTRLx(i));
  1823. ltq_switch_w32_mask(0, 0x01, SDMA_PCTRLx(i));
  1824. ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(i, 0));
  1825. }
  1826.  
  1827. /* enable special tag insertion on cpu port */
  1828. ltq_switch_w32_mask(0, 0x02, FDMA_PCTRLx(6));
  1829. ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(6, 0));
  1830. ltq_switch_w32_mask(0, BIT(3), MAC_CTRL_REG(6, 2));
  1831. ltq_switch_w32(1518 + 8 + 4 * 2, MAC_FLEN_REG);
  1832. xrx200sw_write_x(1, XRX200_BM_QUEUE_GCTRL_GL_MOD, 0);
  1833.  
  1834. for (i = 0; i < XRX200_MAX_VLAN; i++)
  1835. priv->vlan_vid[i] = i;
  1836. }
  1837.  
  1838. static void xrx200_hw_cleanup(struct xrx200_priv *priv)
  1839. {
  1840. int i;
  1841.  
  1842. /* disable the switch */
  1843. ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
  1844.  
  1845. ltq_dma_free(&priv->chan_tx.dma);
  1846. ltq_dma_free(&priv->chan_rx.dma);
  1847.  
  1848. /* free the allocated RX ring */
  1849. for (i = 0; i < LTQ_DESC_NUM; i++)
  1850. dev_kfree_skb_any(priv->chan_rx.skb[i]);
  1851.  
  1852. /* clear the mdio bus */
  1853. mdiobus_unregister(priv->mii_bus);
  1854. mdiobus_free(priv->mii_bus);
  1855.  
  1856. /* release the clock */
  1857. clk_disable(priv->clk);
  1858. clk_put(priv->clk);
  1859. }
  1860.  
  1861. static int xrx200_of_mdio(struct xrx200_priv *priv, struct device_node *np)
  1862. {
  1863. priv->mii_bus = mdiobus_alloc();
  1864. if (!priv->mii_bus)
  1865. return -ENOMEM;
  1866.  
  1867. priv->mii_bus->read = xrx200_mdio_rd;
  1868. priv->mii_bus->write = xrx200_mdio_wr;
  1869. priv->mii_bus->name = "lantiq,xrx200-mdio";
  1870. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
  1871.  
  1872. if (of_mdiobus_register(priv->mii_bus, np)) {
  1873. mdiobus_free(priv->mii_bus);
  1874. return -ENXIO;
  1875. }
  1876.  
  1877. return 0;
  1878. }
  1879.  
  1880. static void xrx200_of_port(struct xrx200_priv *priv, struct device_node *port)
  1881. {
  1882. const __be32 *addr, *id = of_get_property(port, "reg", NULL);
  1883. struct xrx200_port *p = &priv->port[priv->num_port];
  1884.  
  1885. if (!id)
  1886. return;
  1887.  
  1888. memset(p, 0, sizeof(struct xrx200_port));
  1889. p->phy_node = of_parse_phandle(port, "phy-handle", 0);
  1890.  
  1891. if (!p->phy_node && of_phy_is_fixed_link(port)) {
  1892. pr_info("Static link. Port <%d>!\n", p->num);
  1893. if (of_phy_register_fixed_link(port)<0){
  1894. pr_info("invalid fixed-link\n");
  1895. }else{
  1896. pr_info("Registered fixed-link\n");
  1897. }
  1898. p->phy_node = of_node_get(port);
  1899. }
  1900.  
  1901. addr = of_get_property(p->phy_node, "reg", NULL);
  1902.  
  1903. if (!addr)
  1904. return;
  1905.  
  1906. p->num = *id;
  1907. p->phy_addr = *addr;
  1908. p->phy_if = of_get_phy_mode(port);
  1909. if (p->phy_addr > 0x10)
  1910. p->flags = XRX200_PORT_TYPE_MAC;
  1911. else
  1912. p->flags = XRX200_PORT_TYPE_PHY;
  1913.  
  1914. priv->num_port++;
  1915.  
  1916. p->gpio = of_get_gpio_flags(port, 0, &p->gpio_flags);
  1917. if (gpio_is_valid(p->gpio))
  1918. if (!gpio_request(p->gpio, "phy-reset")) {
  1919. gpio_direction_output(p->gpio,
  1920. (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (1) : (0));
  1921. udelay(100);
  1922. gpio_set_value(p->gpio, (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
  1923. }
  1924. /* is this port a wan port ? */
  1925. if (priv->wan)
  1926. priv->wan_map |= BIT(p->num);
  1927.  
  1928. priv->d_port_map |= BIT(p->num);
  1929.  
  1930. /* store the port id in the hw struct so we can map ports -> devices */
  1931. priv->port_map[p->num] = 0;
  1932. }
  1933.  
  1934. static const struct net_device_ops xrx200_netdev_ops = {
  1935. .ndo_init = xrx200_init,
  1936. .ndo_open = xrx200_open,
  1937. .ndo_stop = xrx200_close,
  1938. .ndo_start_xmit = xrx200_start_xmit,
  1939. .ndo_set_mac_address = eth_mac_addr,
  1940. .ndo_validate_addr = eth_validate_addr,
  1941. .ndo_get_stats = xrx200_get_stats,
  1942. .ndo_tx_timeout = xrx200_tx_timeout,
  1943. };
  1944.  
  1945. static void xrx200_of_iface(struct xrx200_priv *priv, struct device_node *iface, struct device *dev)
  1946. {
  1947. struct device_node *port;
  1948. const __be32 *wan;
  1949. const u8 *mac;
  1950.  
  1951. /* setup the network device */
  1952. strcpy(priv->net_dev->name, "eth%d");
  1953. priv->net_dev->netdev_ops = &xrx200_netdev_ops;
  1954. priv->net_dev->watchdog_timeo = XRX200_TX_TIMEOUT;
  1955. priv->net_dev->needed_headroom = XRX200_HEADROOM;
  1956. SET_NETDEV_DEV(priv->net_dev, dev);
  1957.  
  1958. mac = of_get_mac_address(iface);
  1959. if (mac)
  1960. memcpy(priv->mac, mac, ETH_ALEN);
  1961.  
  1962. /* is this the wan interface ? */
  1963. wan = of_get_property(iface, "lantiq,wan", NULL);
  1964. if (wan && (*wan == 1))
  1965. priv->wan = 1;
  1966.  
  1967. /* should the switch be enabled on this interface ? */
  1968. if (of_find_property(iface, "lantiq,switch", NULL))
  1969. priv->sw = 1;
  1970.  
  1971. /* load the ports that are part of the interface */
  1972. for_each_child_of_node(iface, port)
  1973. if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
  1974. xrx200_of_port(priv, port);
  1975.  
  1976. }
  1977.  
  1978. static int xrx200_probe(struct platform_device *pdev)
  1979. {
  1980. struct device *dev = &pdev->dev;
  1981. struct resource *res[4];
  1982. struct device_node *mdio_np, *iface_np, *phy_np;
  1983. struct of_phandle_iterator it;
  1984. int err;
  1985. int i;
  1986. struct xrx200_priv *priv;
  1987. struct net_device *net_dev;
  1988.  
  1989.  
  1990. /* alloc the network device */
  1991. // TODO add multiqueue? devm_alloc_etherdev_mqs
  1992. net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
  1993. if (!net_dev)
  1994. return -ENOMEM;
  1995.  
  1996. priv = netdev_priv(net_dev);
  1997. priv->net_dev = net_dev;
  1998. priv->dev = dev;
  1999.  
  2000. net_dev->netdev_ops = &xrx200_netdev_ops;
  2001. SET_NETDEV_DEV(net_dev, dev);
  2002. net_dev->min_mtu = ETH_ZLEN;
  2003. net_dev->max_mtu = XRX200_DMA_DATA_LEN;
  2004.  
  2005. /* load the memory ranges */
  2006. for (i = 0; i < 4; i++) {
  2007. res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
  2008. if (!res[i]) {
  2009. dev_err(&pdev->dev, "failed to get resources\n");
  2010. return -ENOENT;
  2011. }
  2012. }
  2013. xrx200_switch_membase = devm_ioremap_resource(&pdev->dev, res[0]);
  2014. xrx200_mdio_membase = devm_ioremap_resource(&pdev->dev, res[1]);
  2015. xrx200_mii_membase = devm_ioremap_resource(&pdev->dev, res[2]);
  2016. xrx200_pmac_membase = devm_ioremap_resource(&pdev->dev, res[3]);
  2017. if (!xrx200_switch_membase || !xrx200_mdio_membase ||
  2018. !xrx200_mii_membase || !xrx200_pmac_membase) {
  2019. dev_err(&pdev->dev, "failed to request and remap io ranges \n");
  2020. return -ENOMEM;
  2021. }
  2022.  
  2023. of_for_each_phandle(&it, err, pdev->dev.of_node, "lantiq,phys", NULL, 0) {
  2024. phy_np = it.node;
  2025. if (phy_np) {
  2026. struct platform_device *phy = of_find_device_by_node(phy_np);
  2027.  
  2028. of_node_put(phy_np);
  2029. if (!platform_get_drvdata(phy))
  2030. return -EPROBE_DEFER;
  2031. }
  2032. }
  2033.  
  2034. priv->chan_rx.dma.irq = XRX200_DMA_IRQ + XRX200_DMA_RX;
  2035. priv->chan_tx.dma.irq = XRX200_DMA_IRQ + XRX200_DMA_TX;
  2036. priv->chan_rx.priv = priv;
  2037. priv->chan_tx.priv = priv;
  2038.  
  2039. /* get the clock */
  2040. priv->clk = clk_get(&pdev->dev, NULL);
  2041. if (IS_ERR(priv->clk)) {
  2042. dev_err(&pdev->dev, "failed to get clock\n");
  2043. return PTR_ERR(priv->clk);
  2044. }
  2045.  
  2046. /* bring up the dma engine and IP core */
  2047. err = xrx200_dma_init(priv);
  2048. if (err)
  2049. return err;
  2050.  
  2051. /* enable clock gate */
  2052. err = clk_prepare_enable(priv->clk);
  2053. if (err)
  2054. goto err_uninit_dma;
  2055.  
  2056. xrx200_hw_init(priv);
  2057.  
  2058. /* bring up the mdio bus */
  2059. mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
  2060. "lantiq,xrx200-mdio");
  2061. if (mdio_np)
  2062. if (xrx200_of_mdio(priv, mdio_np))
  2063. dev_err(&pdev->dev, "mdio probe failed\n");
  2064.  
  2065. /* load the interfaces */
  2066. for_each_child_of_node(pdev->dev.of_node, iface_np)
  2067. if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
  2068. xrx200_of_iface(priv, iface_np, &pdev->dev);
  2069. break; //hack
  2070. }
  2071.  
  2072. xrx200sw_init(priv);
  2073.  
  2074. /* set wan port mask */
  2075. ltq_pmac_w32(priv->wan_map, PMAC_EWAN);
  2076.  
  2077. /* setup NAPI */
  2078. netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); //32
  2079. netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
  2080.  
  2081. net_dev->features |= NETIF_F_SG ;
  2082. net_dev->hw_features |= NETIF_F_SG;
  2083. net_dev->vlan_features |= NETIF_F_SG;
  2084.  
  2085. platform_set_drvdata(pdev, priv);
  2086.  
  2087. err = register_netdev(net_dev);
  2088. if (err)
  2089. goto err_unprepare_clk;
  2090.  
  2091. return 0;
  2092.  
  2093. err_unprepare_clk:
  2094. clk_disable_unprepare(priv->clk);
  2095.  
  2096. err_uninit_dma:
  2097. xrx200_hw_cleanup(priv);
  2098.  
  2099. return err;
  2100. }
  2101.  
  2102. static int xrx200_remove(struct platform_device *pdev)
  2103. {
  2104.  
  2105. struct xrx200_priv *priv = platform_get_drvdata(pdev);
  2106. struct net_device *net_dev = priv->net_dev;
  2107.  
  2108. /* free stack related instances */
  2109. netif_stop_queue(net_dev);
  2110. netif_napi_del(&priv->chan_tx.napi);
  2111. netif_napi_del(&priv->chan_rx.napi);
  2112.  
  2113. /* remove the actual device */
  2114. unregister_netdev(net_dev);
  2115.  
  2116. /* release the clock */
  2117. clk_disable_unprepare(priv->clk);
  2118.  
  2119. /* shut down hardware */
  2120. xrx200_hw_cleanup(priv);
  2121.  
  2122. return 0;
  2123. }
  2124.  
  2125. static const struct of_device_id xrx200_match[] = {
  2126. { .compatible = "lantiq,xrx200-net" },
  2127. {},
  2128. };
  2129. MODULE_DEVICE_TABLE(of, xrx200_match);
  2130.  
  2131. static struct platform_driver xrx200_driver = {
  2132. .probe = xrx200_probe,
  2133. .remove = xrx200_remove,
  2134. .driver = {
  2135. .name = "lantiq,xrx200-net",
  2136. .of_match_table = xrx200_match,
  2137. .owner = THIS_MODULE,
  2138. },
  2139. };
  2140.  
  2141. module_platform_driver(xrx200_driver);
  2142.  
  2143. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  2144. MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
  2145. MODULE_LICENSE("GPL");
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement