Advertisement
Guest User

Untitled

a guest
Dec 31st, 2022
57
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 197.43 KB | Source Code | 0 0
  1. diff -rupN a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
  2. --- a/drivers/net/ethernet/freescale/Kconfig    2022-12-25 22:41:39.000000000 +0100
  3. +++ b/drivers/net/ethernet/freescale/Kconfig    2022-12-31 15:56:55.294955336 +0100
  4. @@ -29,7 +29,6 @@ config FEC
  5.     select CRC32
  6.     select PHYLIB
  7.     select PAGE_POOL
  8. -   select PAGE_POOL_STATS
  9.     imply NET_SELFTESTS
  10.     help
  11.       Say Y here if you want to use the built-in 10/100 Fast ethernet
  12. diff -rupN a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
  13. --- a/drivers/net/ethernet/freescale/dpaa/Kconfig   2022-12-25 22:41:39.000000000 +0100
  14. +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig   2022-12-31 15:56:55.294955336 +0100
  15. @@ -2,8 +2,8 @@
  16.  menuconfig FSL_DPAA_ETH
  17.     tristate "DPAA Ethernet"
  18.     depends on FSL_DPAA && FSL_FMAN
  19. -   select PHYLINK
  20. -   select PCS_LYNX
  21. +   select PHYLIB
  22. +   select FIXED_PHY
  23.     help
  24.       Data Path Acceleration Architecture Ethernet driver,
  25.       supporting the Freescale QorIQ chips.
  26. diff -rupN a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
  27. --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c    2022-12-25 22:41:39.000000000 +0100
  28. +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c    2022-12-31 15:56:55.294955336 +0100
  29. @@ -264,19 +264,8 @@ static int dpaa_netdev_init(struct net_d
  30.     net_dev->needed_headroom = priv->tx_headroom;
  31.     net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
  32.  
  33. -   /* The rest of the config is filled in by the mac device already */
  34. -   mac_dev->phylink_config.dev = &net_dev->dev;
  35. -   mac_dev->phylink_config.type = PHYLINK_NETDEV;
  36. +   mac_dev->net_dev = net_dev;
  37.     mac_dev->update_speed = dpaa_eth_cgr_set_speed;
  38. -   mac_dev->phylink = phylink_create(&mac_dev->phylink_config,
  39. -                     dev_fwnode(mac_dev->dev),
  40. -                     mac_dev->phy_if,
  41. -                     mac_dev->phylink_ops);
  42. -   if (IS_ERR(mac_dev->phylink)) {
  43. -       err = PTR_ERR(mac_dev->phylink);
  44. -       dev_err_probe(dev, err, "Could not create phylink\n");
  45. -       return err;
  46. -   }
  47.  
  48.     /* start without the RUNNING flag, phylib controls it later */
  49.     netif_carrier_off(net_dev);
  50. @@ -284,7 +273,6 @@ static int dpaa_netdev_init(struct net_d
  51.     err = register_netdev(net_dev);
  52.     if (err < 0) {
  53.         dev_err(dev, "register_netdev() = %d\n", err);
  54. -       phylink_destroy(mac_dev->phylink);
  55.         return err;
  56.     }
  57.  
  58. @@ -306,7 +294,8 @@ static int dpaa_stop(struct net_device *
  59.      */
  60.     msleep(200);
  61.  
  62. -   phylink_stop(mac_dev->phylink);
  63. +   if (mac_dev->phy_dev)
  64. +       phy_stop(mac_dev->phy_dev);
  65.     mac_dev->disable(mac_dev->fman_mac);
  66.  
  67.     for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
  68. @@ -315,7 +304,8 @@ static int dpaa_stop(struct net_device *
  69.             err = error;
  70.     }
  71.  
  72. -   phylink_disconnect_phy(mac_dev->phylink);
  73. +   if (net_dev->phydev)
  74. +       phy_disconnect(net_dev->phydev);
  75.     net_dev->phydev = NULL;
  76.  
  77.     msleep(200);
  78. @@ -843,10 +833,10 @@ static int dpaa_eth_cgr_init(struct dpaa
  79.  
  80.     /* Set different thresholds based on the configured MAC speed.
  81.      * This may turn suboptimal if the MAC is reconfigured at another
  82. -    * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up
  83. +    * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
  84.      * callback.
  85.      */
  86. -   if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD)
  87. +   if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
  88.         cs_th = DPAA_CS_THRESHOLD_10G;
  89.     else
  90.         cs_th = DPAA_CS_THRESHOLD_1G;
  91. @@ -875,7 +865,7 @@ out_error:
  92.  
  93.  static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
  94.  {
  95. -   struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev);
  96. +   struct net_device *net_dev = mac_dev->net_dev;
  97.     struct dpaa_priv *priv = netdev_priv(net_dev);
  98.     struct qm_mcc_initcgr opts = { };
  99.     u32 cs_th;
  100. @@ -2914,6 +2904,58 @@ static void dpaa_eth_napi_disable(struct
  101.     }
  102.  }
  103.  
  104. +static void dpaa_adjust_link(struct net_device *net_dev)
  105. +{
  106. +   struct mac_device *mac_dev;
  107. +   struct dpaa_priv *priv;
  108. +
  109. +   priv = netdev_priv(net_dev);
  110. +   mac_dev = priv->mac_dev;
  111. +   mac_dev->adjust_link(mac_dev);
  112. +}
  113. +
  114. +/* The Aquantia PHYs are capable of performing rate adaptation */
  115. +#define PHY_VEND_AQUANTIA  0x03a1b400
  116. +#define PHY_VEND_AQUANTIA2 0x31c31c00
  117. +
  118. +static int dpaa_phy_init(struct net_device *net_dev)
  119. +{
  120. +   __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
  121. +   struct mac_device *mac_dev;
  122. +   struct phy_device *phy_dev;
  123. +   struct dpaa_priv *priv;
  124. +   u32 phy_vendor;
  125. +
  126. +   priv = netdev_priv(net_dev);
  127. +   mac_dev = priv->mac_dev;
  128. +
  129. +   phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
  130. +                &dpaa_adjust_link, 0,
  131. +                mac_dev->phy_if);
  132. +   if (!phy_dev) {
  133. +       netif_err(priv, ifup, net_dev, "init_phy() failed\n");
  134. +       return -ENODEV;
  135. +   }
  136. +
  137. +   phy_vendor = phy_dev->drv->phy_id & GENMASK(31, 10);
  138. +   /* Unless the PHY is capable of rate adaptation */
  139. +   if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
  140. +       (phy_vendor != PHY_VEND_AQUANTIA &&
  141. +        phy_vendor != PHY_VEND_AQUANTIA2)) {
  142. +       /* remove any features not supported by the controller */
  143. +       ethtool_convert_legacy_u32_to_link_mode(mask,
  144. +                           mac_dev->if_support);
  145. +       linkmode_and(phy_dev->supported, phy_dev->supported, mask);
  146. +   }
  147. +
  148. +   phy_support_asym_pause(phy_dev);
  149. +
  150. +   mac_dev->phy_dev = phy_dev;
  151. +   net_dev->phydev = phy_dev;
  152. +
  153. +   return 0;
  154. +}
  155. +
  156.  static int dpaa_open(struct net_device *net_dev)
  157.  {
  158.     struct mac_device *mac_dev;
  159. @@ -2924,8 +2966,7 @@ static int dpaa_open(struct net_device *
  160.     mac_dev = priv->mac_dev;
  161.     dpaa_eth_napi_enable(priv);
  162.  
  163. -   err = phylink_of_phy_connect(mac_dev->phylink,
  164. -                    mac_dev->dev->of_node, 0);
  165. +   err = dpaa_phy_init(net_dev);
  166.     if (err)
  167.         goto phy_init_failed;
  168.  
  169. @@ -2940,7 +2981,7 @@ static int dpaa_open(struct net_device *
  170.         netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
  171.         goto mac_start_failed;
  172.     }
  173. -   phylink_start(mac_dev->phylink);
  174. +   phy_start(priv->mac_dev->phy_dev);
  175.  
  176.     netif_tx_start_all_queues(net_dev);
  177.  
  178. @@ -2949,7 +2990,6 @@ static int dpaa_open(struct net_device *
  179.  mac_start_failed:
  180.     for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
  181.         fman_port_disable(mac_dev->port[i]);
  182. -   phylink_disconnect_phy(mac_dev->phylink);
  183.  
  184.  phy_init_failed:
  185.     dpaa_eth_napi_disable(priv);
  186. @@ -3105,12 +3145,10 @@ static int dpaa_ts_ioctl(struct net_devi
  187.  static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
  188.  {
  189.     int ret = -EINVAL;
  190. -   struct dpaa_priv *priv = netdev_priv(net_dev);
  191.  
  192.     if (cmd == SIOCGMIIREG) {
  193.         if (net_dev->phydev)
  194. -           return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
  195. -                        cmd);
  196. +           return phy_mii_ioctl(net_dev->phydev, rq, cmd);
  197.     }
  198.  
  199.     if (cmd == SIOCSHWTSTAMP)
  200. @@ -3513,7 +3551,6 @@ static int dpaa_remove(struct platform_d
  201.  
  202.     dev_set_drvdata(dev, NULL);
  203.     unregister_netdev(net_dev);
  204. -   phylink_destroy(priv->mac_dev->phylink);
  205.  
  206.     err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
  207.  
  208. diff -rupN a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
  209. --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c    2022-12-25 22:41:39.000000000 +0100
  210. +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c    2022-12-31 15:56:55.295955322 +0100
  211. @@ -54,19 +54,27 @@ static char dpaa_stats_global[][ETH_GSTR
  212.  static int dpaa_get_link_ksettings(struct net_device *net_dev,
  213.                    struct ethtool_link_ksettings *cmd)
  214.  {
  215. -   struct dpaa_priv *priv = netdev_priv(net_dev);
  216. -   struct mac_device *mac_dev = priv->mac_dev;
  217. +   if (!net_dev->phydev)
  218. +       return 0;
  219.  
  220. -   return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd);
  221. +   phy_ethtool_ksettings_get(net_dev->phydev, cmd);
  222. +
  223. +   return 0;
  224.  }
  225.  
  226.  static int dpaa_set_link_ksettings(struct net_device *net_dev,
  227.                    const struct ethtool_link_ksettings *cmd)
  228.  {
  229. -   struct dpaa_priv *priv = netdev_priv(net_dev);
  230. -   struct mac_device *mac_dev = priv->mac_dev;
  231. +   int err;
  232. +
  233. +   if (!net_dev->phydev)
  234. +       return -ENODEV;
  235.  
  236. -   return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd);
  237. +   err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
  238. +   if (err < 0)
  239. +       netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
  240. +
  241. +   return err;
  242.  }
  243.  
  244.  static void dpaa_get_drvinfo(struct net_device *net_dev,
  245. @@ -91,28 +99,80 @@ static void dpaa_set_msglevel(struct net
  246.  
  247.  static int dpaa_nway_reset(struct net_device *net_dev)
  248.  {
  249. -   struct dpaa_priv *priv = netdev_priv(net_dev);
  250. -   struct mac_device *mac_dev = priv->mac_dev;
  251. +   int err;
  252. +
  253. +   if (!net_dev->phydev)
  254. +       return -ENODEV;
  255.  
  256. -   return phylink_ethtool_nway_reset(mac_dev->phylink);
  257. +   err = 0;
  258. +   if (net_dev->phydev->autoneg) {
  259. +       err = phy_start_aneg(net_dev->phydev);
  260. +       if (err < 0)
  261. +           netdev_err(net_dev, "phy_start_aneg() = %d\n",
  262. +                  err);
  263. +   }
  264. +
  265. +   return err;
  266.  }
  267.  
  268.  static void dpaa_get_pauseparam(struct net_device *net_dev,
  269.                 struct ethtool_pauseparam *epause)
  270.  {
  271. -   struct dpaa_priv *priv = netdev_priv(net_dev);
  272. -   struct mac_device *mac_dev = priv->mac_dev;
  273. +   struct mac_device *mac_dev;
  274. +   struct dpaa_priv *priv;
  275. +
  276. +   priv = netdev_priv(net_dev);
  277. +   mac_dev = priv->mac_dev;
  278. +
  279. +   if (!net_dev->phydev)
  280. +       return;
  281.  
  282. -   phylink_ethtool_get_pauseparam(mac_dev->phylink, epause);
  283. +   epause->autoneg = mac_dev->autoneg_pause;
  284. +   epause->rx_pause = mac_dev->rx_pause_active;
  285. +   epause->tx_pause = mac_dev->tx_pause_active;
  286.  }
  287.  
  288.  static int dpaa_set_pauseparam(struct net_device *net_dev,
  289.                    struct ethtool_pauseparam *epause)
  290.  {
  291. -   struct dpaa_priv *priv = netdev_priv(net_dev);
  292. -   struct mac_device *mac_dev = priv->mac_dev;
  293. +   struct mac_device *mac_dev;
  294. +   struct phy_device *phydev;
  295. +   bool rx_pause, tx_pause;
  296. +   struct dpaa_priv *priv;
  297. +   int err;
  298. +
  299. +   priv = netdev_priv(net_dev);
  300. +   mac_dev = priv->mac_dev;
  301. +
  302. +   phydev = net_dev->phydev;
  303. +   if (!phydev) {
  304. +       netdev_err(net_dev, "phy device not initialized\n");
  305. +       return -ENODEV;
  306. +   }
  307. +
  308. +   if (!phy_validate_pause(phydev, epause))
  309. +       return -EINVAL;
  310. +
  311. +   /* The MAC should know how to handle PAUSE frame autonegotiation before
  312. +    * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
  313. +    * settings.
  314. +    */
  315. +   mac_dev->autoneg_pause = !!epause->autoneg;
  316. +   mac_dev->rx_pause_req = !!epause->rx_pause;
  317. +   mac_dev->tx_pause_req = !!epause->tx_pause;
  318. +
  319. +   /* Determine the sym/asym advertised PAUSE capabilities from the desired
  320. +    * rx/tx pause settings.
  321. +    */
  322. +
  323. +   phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
  324. +
  325. +   fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
  326. +   err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
  327. +   if (err < 0)
  328. +       netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
  329.  
  330. -   return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause);
  331. +   return err;
  332.  }
  333.  
  334.  static int dpaa_get_sset_count(struct net_device *net_dev, int type)
  335. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
  336. --- a/drivers/net/ethernet/freescale/dpaa2/Makefile 2022-12-25 22:41:39.000000000 +0100
  337. +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile 2022-12-31 15:56:55.295955322 +0100
  338. @@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH)     += fsl-dpaa
  339.  obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK)  += fsl-dpaa2-ptp.o
  340.  obj-$(CONFIG_FSL_DPAA2_SWITCH)     += fsl-dpaa2-switch.o
  341.  
  342. -fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o
  343. +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
  344.  fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
  345.  fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
  346.  fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
  347. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
  348. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c  2022-12-25 22:41:39.000000000 +0100
  349. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c  2022-12-31 15:56:55.295955322 +0100
  350. @@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_
  351.     int i;
  352.  
  353.     seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
  354. -   seq_printf(file, "%s  %5s%16s%16s%16s%16s%16s%16s\n",
  355. -          "IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs",
  356. +   seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
  357. +          "CHID", "CPU", "Deq busy", "Frames", "CDANs",
  358.            "Avg Frm/CDAN", "Buf count");
  359.  
  360.     for (i = 0; i < priv->num_channels; i++) {
  361.         ch = priv->channel[i];
  362. -       seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n",
  363. -              "CH#", i, ch->ch_id,
  364. +       seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
  365. +              ch->ch_id,
  366.                ch->nctx.desired_cpu,
  367.                ch->stats.dequeue_portal_busy,
  368.                ch->stats.frames,
  369. @@ -119,51 +119,6 @@ static int dpaa2_dbg_ch_show(struct seq_
  370.  
  371.  DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
  372.  
  373. -static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset)
  374. -{
  375. -   struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
  376. -   int i, j, num_queues, buf_cnt;
  377. -   struct dpaa2_eth_bp *bp;
  378. -   char ch_name[10];
  379. -   int err;
  380. -
  381. -   /* Print out the header */
  382. -   seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name);
  383. -   seq_printf(file, "%s  %10s%15s", "IDX", "BPID", "Buf count");
  384. -   num_queues = dpaa2_eth_queue_count(priv);
  385. -   for (i = 0; i < num_queues; i++) {
  386. -       snprintf(ch_name, sizeof(ch_name), "CH#%d", i);
  387. -       seq_printf(file, "%10s", ch_name);
  388. -   }
  389. -   seq_printf(file, "\n");
  390. -
  391. -   /* For each buffer pool, print out its BPID, the number of buffers in
  392. -    * that buffer pool and the channels which are using it.
  393. -    */
  394. -   for (i = 0; i < priv->num_bps; i++) {
  395. -       bp = priv->bp[i];
  396. -
  397. -       err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt);
  398. -       if (err) {
  399. -           netdev_warn(priv->net_dev, "Buffer count query error %d\n", err);
  400. -           return err;
  401. -       }
  402. -
  403. -       seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt);
  404. -       for (j = 0; j < num_queues; j++) {
  405. -           if (priv->channel[j]->bp == bp)
  406. -               seq_printf(file, "%10s", "x");
  407. -           else
  408. -               seq_printf(file, "%10s", "");
  409. -       }
  410. -       seq_printf(file, "\n");
  411. -   }
  412. -
  413. -   return 0;
  414. -}
  415. -
  416. -DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp);
  417. -
  418.  void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
  419.  {
  420.     struct fsl_mc_device *dpni_dev;
  421. @@ -184,10 +139,6 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv
  422.  
  423.     /* per-fq stats file */
  424.     debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
  425. -
  426. -   /* per buffer pool stats file */
  427. -   debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops);
  428. -
  429.  }
  430.  
  431.  void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
  432. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
  433. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c  2022-12-25 22:41:39.000000000 +0100
  434. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c  2022-12-31 15:56:55.295955322 +0100
  435. @@ -37,9 +37,18 @@ static int dpaa2_eth_dl_info_get(struct
  436.     struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
  437.     struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
  438.     char buf[10];
  439. +   int err;
  440. +
  441. +   err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
  442. +   if (err)
  443. +       return err;
  444.  
  445.     scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
  446. -   return devlink_info_version_running_put(req, "dpni", buf);
  447. +   err = devlink_info_version_running_put(req, "dpni", buf);
  448. +   if (err)
  449. +       return err;
  450. +
  451. +   return 0;
  452.  }
  453.  
  454.  static struct dpaa2_eth_trap_item *
  455. @@ -217,16 +226,25 @@ int dpaa2_eth_dl_port_add(struct dpaa2_e
  456.  {
  457.     struct devlink_port *devlink_port = &priv->devlink_port;
  458.     struct devlink_port_attrs attrs = {};
  459. +   int err;
  460.  
  461.     attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
  462.     devlink_port_attrs_set(devlink_port, &attrs);
  463. -   return devlink_port_register(priv->devlink, devlink_port, 0);
  464. +
  465. +   err = devlink_port_register(priv->devlink, devlink_port, 0);
  466. +   if (err)
  467. +       return err;
  468. +
  469. +   devlink_port_type_eth_set(devlink_port, priv->net_dev);
  470. +
  471. +   return 0;
  472.  }
  473.  
  474.  void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
  475.  {
  476.     struct devlink_port *devlink_port = &priv->devlink_port;
  477.  
  478. +   devlink_port_type_clear(devlink_port);
  479.     devlink_port_unregister(devlink_port);
  480.  }
  481.  
  482. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
  483. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h    2022-12-25 22:41:39.000000000 +0100
  484. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h    2022-12-31 15:56:55.295955322 +0100
  485. @@ -73,14 +73,6 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
  486.          TP_ARGS(netdev, fd)
  487.  );
  488.  
  489. -/* Tx (egress) XSK fd */
  490. -DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd,
  491. -        TP_PROTO(struct net_device *netdev,
  492. -             const struct dpaa2_fd *fd),
  493. -
  494. -        TP_ARGS(netdev, fd)
  495. -);
  496. -
  497.  /* Rx fd */
  498.  DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
  499.          TP_PROTO(struct net_device *netdev,
  500. @@ -89,14 +81,6 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
  501.          TP_ARGS(netdev, fd)
  502.  );
  503.  
  504. -/* Rx XSK fd */
  505. -DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd,
  506. -        TP_PROTO(struct net_device *netdev,
  507. -             const struct dpaa2_fd *fd),
  508. -
  509. -        TP_ARGS(netdev, fd)
  510. -);
  511. -
  512.  /* Tx confirmation fd */
  513.  DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
  514.          TP_PROTO(struct net_device *netdev,
  515. @@ -106,81 +90,57 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf
  516.  );
  517.  
  518.  /* Log data about raw buffers. Useful for tracing DPBP content. */
  519. -DECLARE_EVENT_CLASS(dpaa2_eth_buf,
  520. -           /* Trace function prototype */
  521. -           TP_PROTO(struct net_device *netdev,
  522. -                /* virtual address and size */
  523. -               void *vaddr,
  524. -               size_t size,
  525. -               /* dma map address and size */
  526. -               dma_addr_t dma_addr,
  527. -               size_t map_size,
  528. -               /* buffer pool id, if relevant */
  529. -               u16 bpid),
  530. -
  531. -           /* Repeat argument list here */
  532. -           TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
  533. -
  534. -           /* A structure containing the relevant information we want
  535. -            * to record. Declare name and type for each normal element,
  536. -            * name, type and size for arrays. Use __string for variable
  537. -            * length strings.
  538. -            */
  539. -           TP_STRUCT__entry(
  540. -                     __field(void *, vaddr)
  541. -                     __field(size_t, size)
  542. -                     __field(dma_addr_t, dma_addr)
  543. -                     __field(size_t, map_size)
  544. -                     __field(u16, bpid)
  545. -                     __string(name, netdev->name)
  546. -           ),
  547. -
  548. -           /* The function that assigns values to the above declared
  549. -            * fields
  550. -            */
  551. -           TP_fast_assign(
  552. -                  __entry->vaddr = vaddr;
  553. -                  __entry->size = size;
  554. -                  __entry->dma_addr = dma_addr;
  555. -                  __entry->map_size = map_size;
  556. -                  __entry->bpid = bpid;
  557. -                  __assign_str(name, netdev->name);
  558. -           ),
  559. -
  560. -           /* This is what gets printed when the trace event is
  561. -            * triggered.
  562. -            */
  563. -           TP_printk(TR_BUF_FMT,
  564. -                 __get_str(name),
  565. -                 __entry->vaddr,
  566. -                 __entry->size,
  567. -                 &__entry->dma_addr,
  568. -                 __entry->map_size,
  569. -                 __entry->bpid)
  570. -);
  571. -
  572. -/* Main memory buff seeding */
  573. -DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed,
  574. -        TP_PROTO(struct net_device *netdev,
  575. -             void *vaddr,
  576. -             size_t size,
  577. -             dma_addr_t dma_addr,
  578. -             size_t map_size,
  579. -             u16 bpid),
  580. -
  581. -        TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
  582. -);
  583. -
  584. -/* UMEM buff seeding on AF_XDP fast path */
  585. -DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed,
  586. -        TP_PROTO(struct net_device *netdev,
  587. -             void *vaddr,
  588. -             size_t size,
  589. -             dma_addr_t dma_addr,
  590. -             size_t map_size,
  591. -             u16 bpid),
  592. -
  593. -        TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
  594. +TRACE_EVENT(dpaa2_eth_buf_seed,
  595. +       /* Trace function prototype */
  596. +       TP_PROTO(struct net_device *netdev,
  597. +            /* virtual address and size */
  598. +            void *vaddr,
  599. +            size_t size,
  600. +            /* dma map address and size */
  601. +            dma_addr_t dma_addr,
  602. +            size_t map_size,
  603. +            /* buffer pool id, if relevant */
  604. +            u16 bpid),
  605. +
  606. +       /* Repeat argument list here */
  607. +       TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
  608. +
  609. +       /* A structure containing the relevant information we want
  610. +        * to record. Declare name and type for each normal element,
  611. +        * name, type and size for arrays. Use __string for variable
  612. +        * length strings.
  613. +        */
  614. +       TP_STRUCT__entry(
  615. +                __field(void *, vaddr)
  616. +                __field(size_t, size)
  617. +                __field(dma_addr_t, dma_addr)
  618. +                __field(size_t, map_size)
  619. +                __field(u16, bpid)
  620. +                __string(name, netdev->name)
  621. +       ),
  622. +
  623. +       /* The function that assigns values to the above declared
  624. +        * fields
  625. +        */
  626. +       TP_fast_assign(
  627. +              __entry->vaddr = vaddr;
  628. +              __entry->size = size;
  629. +              __entry->dma_addr = dma_addr;
  630. +              __entry->map_size = map_size;
  631. +              __entry->bpid = bpid;
  632. +              __assign_str(name, netdev->name);
  633. +       ),
  634. +
  635. +       /* This is what gets printed when the trace event is
  636. +        * triggered.
  637. +        */
  638. +       TP_printk(TR_BUF_FMT,
  639. +             __get_str(name),
  640. +             __entry->vaddr,
  641. +             __entry->size,
  642. +             &__entry->dma_addr,
  643. +             __entry->map_size,
  644. +             __entry->bpid)
  645.  );
  646.  
  647.  /* If only one event of a certain type needs to be declared, use TRACE_EVENT().
  648. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
  649. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c  2022-12-25 22:41:39.000000000 +0100
  650. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c  2022-12-31 15:56:55.295955322 +0100
  651. @@ -1,6 +1,6 @@
  652.  // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  653.  /* Copyright 2014-2016 Freescale Semiconductor Inc.
  654. - * Copyright 2016-2022 NXP
  655. + * Copyright 2016-2020 NXP
  656.   */
  657.  #include <linux/init.h>
  658.  #include <linux/module.h>
  659. @@ -8,6 +8,7 @@
  660.  #include <linux/etherdevice.h>
  661.  #include <linux/of_net.h>
  662.  #include <linux/interrupt.h>
  663. +#include <linux/msi.h>
  664.  #include <linux/kthread.h>
  665.  #include <linux/iommu.h>
  666.  #include <linux/fsl/mc.h>
  667. @@ -18,7 +19,6 @@
  668.  #include <net/pkt_cls.h>
  669.  #include <net/sock.h>
  670.  #include <net/tso.h>
  671. -#include <net/xdp_sock_drv.h>
  672.  
  673.  #include "dpaa2-eth.h"
  674.  
  675. @@ -104,8 +104,8 @@ static void dpaa2_ptp_onestep_reg_update
  676.     priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
  677.  }
  678.  
  679. -void *dpaa2_iova_to_virt(struct iommu_domain *domain,
  680. -            dma_addr_t iova_addr)
  681. +static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
  682. +               dma_addr_t iova_addr)
  683.  {
  684.     phys_addr_t phys_addr;
  685.  
  686. @@ -279,33 +279,23 @@ static struct sk_buff *dpaa2_eth_build_f
  687.   * be released in the pool
  688.   */
  689.  static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
  690. -               int count, bool xsk_zc)
  691. +               int count)
  692.  {
  693.     struct device *dev = priv->net_dev->dev.parent;
  694. -   struct dpaa2_eth_swa *swa;
  695. -   struct xdp_buff *xdp_buff;
  696.     void *vaddr;
  697.     int i;
  698.  
  699.     for (i = 0; i < count; i++) {
  700.         vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
  701. -
  702. -       if (!xsk_zc) {
  703. -           dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
  704. -                      DMA_BIDIRECTIONAL);
  705. -           free_pages((unsigned long)vaddr, 0);
  706. -       } else {
  707. -           swa = (struct dpaa2_eth_swa *)
  708. -               (vaddr + DPAA2_ETH_RX_HWA_SIZE);
  709. -           xdp_buff = swa->xsk.xdp_buff;
  710. -           xsk_buff_free(xdp_buff);
  711. -       }
  712. +       dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
  713. +                  DMA_BIDIRECTIONAL);
  714. +       free_pages((unsigned long)vaddr, 0);
  715.     }
  716.  }
  717.  
  718. -void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
  719. -              struct dpaa2_eth_channel *ch,
  720. -              dma_addr_t addr)
  721. +static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
  722. +                 struct dpaa2_eth_channel *ch,
  723. +                 dma_addr_t addr)
  724.  {
  725.     int retries = 0;
  726.     int err;
  727. @@ -314,7 +304,7 @@ void dpaa2_eth_recycle_buf(struct dpaa2_
  728.     if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
  729.         return;
  730.  
  731. -   while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
  732. +   while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
  733.                            ch->recycled_bufs,
  734.                            ch->recycled_bufs_cnt)) == -EBUSY) {
  735.         if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
  736. @@ -323,8 +313,7 @@ void dpaa2_eth_recycle_buf(struct dpaa2_
  737.     }
  738.  
  739.     if (err) {
  740. -       dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
  741. -                   ch->recycled_bufs_cnt, ch->xsk_zc);
  742. +       dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
  743.         ch->buf_count -= ch->recycled_bufs_cnt;
  744.     }
  745.  
  746. @@ -388,10 +377,10 @@ static void dpaa2_eth_xdp_tx_flush(struc
  747.     fq->xdp_tx_fds.num = 0;
  748.  }
  749.  
  750. -void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
  751. -              struct dpaa2_eth_channel *ch,
  752. -              struct dpaa2_fd *fd,
  753. -              void *buf_start, u16 queue_id)
  754. +static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
  755. +                 struct dpaa2_eth_channel *ch,
  756. +                 struct dpaa2_fd *fd,
  757. +                 void *buf_start, u16 queue_id)
  758.  {
  759.     struct dpaa2_faead *faead;
  760.     struct dpaa2_fd *dest_fd;
  761. @@ -496,15 +485,19 @@ out:
  762.     return xdp_act;
  763.  }
  764.  
  765. -struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
  766. -                   struct dpaa2_eth_channel *ch,
  767. -                   const struct dpaa2_fd *fd, u32 fd_length,
  768. -                   void *fd_vaddr)
  769. +static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
  770. +                      const struct dpaa2_fd *fd,
  771. +                      void *fd_vaddr)
  772.  {
  773.     u16 fd_offset = dpaa2_fd_get_offset(fd);
  774. +   struct dpaa2_eth_priv *priv = ch->priv;
  775. +   u32 fd_length = dpaa2_fd_get_len(fd);
  776.     struct sk_buff *skb = NULL;
  777.     unsigned int skb_len;
  778.  
  779. +   if (fd_length > priv->rx_copybreak)
  780. +       return NULL;
  781. +
  782.     skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
  783.  
  784.     skb = napi_alloc_skb(&ch->napi, skb_len);
  785. @@ -521,66 +514,11 @@ struct sk_buff *dpaa2_eth_alloc_skb(stru
  786.     return skb;
  787.  }
  788.  
  789. -static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
  790. -                      const struct dpaa2_fd *fd,
  791. -                      void *fd_vaddr)
  792. -{
  793. -   struct dpaa2_eth_priv *priv = ch->priv;
  794. -   u32 fd_length = dpaa2_fd_get_len(fd);
  795. -
  796. -   if (fd_length > priv->rx_copybreak)
  797. -       return NULL;
  798. -
  799. -   return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
  800. -}
  801. -
  802. -void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
  803. -              struct dpaa2_eth_channel *ch,
  804. -              const struct dpaa2_fd *fd, void *vaddr,
  805. -              struct dpaa2_eth_fq *fq,
  806. -              struct rtnl_link_stats64 *percpu_stats,
  807. -              struct sk_buff *skb)
  808. -{
  809. -   struct dpaa2_fas *fas;
  810. -   u32 status = 0;
  811. -
  812. -   fas = dpaa2_get_fas(vaddr, false);
  813. -   prefetch(fas);
  814. -   prefetch(skb->data);
  815. -
  816. -   /* Get the timestamp value */
  817. -   if (priv->rx_tstamp) {
  818. -       struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
  819. -       __le64 *ts = dpaa2_get_ts(vaddr, false);
  820. -       u64 ns;
  821. -
  822. -       memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  823. -
  824. -       ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
  825. -       shhwtstamps->hwtstamp = ns_to_ktime(ns);
  826. -   }
  827. -
  828. -   /* Check if we need to validate the L4 csum */
  829. -   if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
  830. -       status = le32_to_cpu(fas->status);
  831. -       dpaa2_eth_validate_rx_csum(priv, status, skb);
  832. -   }
  833. -
  834. -   skb->protocol = eth_type_trans(skb, priv->net_dev);
  835. -   skb_record_rx_queue(skb, fq->flowid);
  836. -
  837. -   percpu_stats->rx_packets++;
  838. -   percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
  839. -   ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
  840. -
  841. -   list_add_tail(&skb->list, ch->rx_list);
  842. -}
  843. -
  844.  /* Main Rx frame processing routine */
  845. -void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
  846. -         struct dpaa2_eth_channel *ch,
  847. -         const struct dpaa2_fd *fd,
  848. -         struct dpaa2_eth_fq *fq)
  849. +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
  850. +            struct dpaa2_eth_channel *ch,
  851. +            const struct dpaa2_fd *fd,
  852. +            struct dpaa2_eth_fq *fq)
  853.  {
  854.     dma_addr_t addr = dpaa2_fd_get_addr(fd);
  855.     u8 fd_format = dpaa2_fd_get_format(fd);
  856. @@ -589,7 +527,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv
  857.     struct rtnl_link_stats64 *percpu_stats;
  858.     struct dpaa2_eth_drv_stats *percpu_extras;
  859.     struct device *dev = priv->net_dev->dev.parent;
  860. +   struct dpaa2_fas *fas;
  861.     void *buf_data;
  862. +   u32 status = 0;
  863.     u32 xdp_act;
  864.  
  865.     /* Tracing point */
  866. @@ -599,6 +539,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv
  867.     dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
  868.                 DMA_BIDIRECTIONAL);
  869.  
  870. +   fas = dpaa2_get_fas(vaddr, false);
  871. +   prefetch(fas);
  872.     buf_data = vaddr + dpaa2_fd_get_offset(fd);
  873.     prefetch(buf_data);
  874.  
  875. @@ -636,7 +578,35 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv
  876.     if (unlikely(!skb))
  877.         goto err_build_skb;
  878.  
  879. -   dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
  880. +   prefetch(skb->data);
  881. +
  882. +   /* Get the timestamp value */
  883. +   if (priv->rx_tstamp) {
  884. +       struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
  885. +       __le64 *ts = dpaa2_get_ts(vaddr, false);
  886. +       u64 ns;
  887. +
  888. +       memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  889. +
  890. +       ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
  891. +       shhwtstamps->hwtstamp = ns_to_ktime(ns);
  892. +   }
  893. +
  894. +   /* Check if we need to validate the L4 csum */
  895. +   if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
  896. +       status = le32_to_cpu(fas->status);
  897. +       dpaa2_eth_validate_rx_csum(priv, status, skb);
  898. +   }
  899. +
  900. +   skb->protocol = eth_type_trans(skb, priv->net_dev);
  901. +   skb_record_rx_queue(skb, fq->flowid);
  902. +
  903. +   percpu_stats->rx_packets++;
  904. +   percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
  905. +   ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
  906. +
  907. +   list_add_tail(&skb->list, ch->rx_list);
  908. +
  909.     return;
  910.  
  911.  err_build_skb:
  912. @@ -857,7 +827,7 @@ static void dpaa2_eth_enable_tx_tstamp(s
  913.     }
  914.  }
  915.  
  916. -void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
  917. +static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
  918.  {
  919.     struct dpaa2_eth_sgt_cache *sgt_cache;
  920.     void *sgt_buf = NULL;
  921. @@ -879,7 +849,7 @@ void *dpaa2_eth_sgt_get(struct dpaa2_eth
  922.     return sgt_buf;
  923.  }
  924.  
  925. -void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
  926. +static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
  927.  {
  928.     struct dpaa2_eth_sgt_cache *sgt_cache;
  929.  
  930. @@ -1114,10 +1084,9 @@ static int dpaa2_eth_build_single_fd(str
  931.   * This can be called either from dpaa2_eth_tx_conf() or on the error path of
  932.   * dpaa2_eth_tx().
  933.   */
  934. -void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
  935. -             struct dpaa2_eth_channel *ch,
  936. -             struct dpaa2_eth_fq *fq,
  937. -             const struct dpaa2_fd *fd, bool in_napi)
  938. +static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
  939. +                struct dpaa2_eth_fq *fq,
  940. +                const struct dpaa2_fd *fd, bool in_napi)
  941.  {
  942.     struct device *dev = priv->net_dev->dev.parent;
  943.     dma_addr_t fd_addr, sg_addr;
  944. @@ -1184,10 +1153,6 @@ void dpaa2_eth_free_tx_fd(struct dpaa2_e
  945.  
  946.             if (!swa->tso.is_last_fd)
  947.                 should_free_skb = 0;
  948. -       } else if (swa->type == DPAA2_ETH_SWA_XSK) {
  949. -           /* Unmap the SGT Buffer */
  950. -           dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
  951. -                    DMA_BIDIRECTIONAL);
  952.         } else {
  953.             skb = swa->single.skb;
  954.  
  955. @@ -1205,12 +1170,6 @@ void dpaa2_eth_free_tx_fd(struct dpaa2_e
  956.         return;
  957.     }
  958.  
  959. -   if (swa->type == DPAA2_ETH_SWA_XSK) {
  960. -       ch->xsk_tx_pkts_sent++;
  961. -       dpaa2_eth_sgt_recycle(priv, buffer_start);
  962. -       return;
  963. -   }
  964. -
  965.     if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
  966.         fq->dq_frames++;
  967.         fq->dq_bytes += fd_len;
  968. @@ -1385,7 +1344,7 @@ err_alloc_tso_hdr:
  969.  err_sgt_get:
  970.     /* Free all the other FDs that were already fully created */
  971.     for (i = 0; i < index; i++)
  972. -       dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
  973. +       dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
  974.  
  975.     return err;
  976.  }
  977. @@ -1501,7 +1460,7 @@ static netdev_tx_t __dpaa2_eth_tx(struct
  978.     if (unlikely(err < 0)) {
  979.         percpu_stats->tx_errors++;
  980.         /* Clean up everything, including freeing the skb */
  981. -       dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
  982. +       dpaa2_eth_free_tx_fd(priv, fq, fd, false);
  983.         netdev_tx_completed_queue(nq, 1, fd_len);
  984.     } else {
  985.         percpu_stats->tx_packets += total_enqueued;
  986. @@ -1594,7 +1553,7 @@ static void dpaa2_eth_tx_conf(struct dpa
  987.  
  988.     /* Check frame errors in the FD field */
  989.     fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
  990. -   dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
  991. +   dpaa2_eth_free_tx_fd(priv, fq, fd, true);
  992.  
  993.     if (likely(!fd_errors))
  994.         return;
  995. @@ -1672,76 +1631,44 @@ static int dpaa2_eth_set_tx_csum(struct
  996.   * to the specified buffer pool
  997.   */
  998.  static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
  999. -                 struct dpaa2_eth_channel *ch)
  1000. +                 struct dpaa2_eth_channel *ch, u16 bpid)
  1001.  {
  1002. -   struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
  1003.     struct device *dev = priv->net_dev->dev.parent;
  1004.     u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
  1005. -   struct dpaa2_eth_swa *swa;
  1006.     struct page *page;
  1007.     dma_addr_t addr;
  1008.     int retries = 0;
  1009. -   int i = 0, err;
  1010. -   u32 batch;
  1011. +   int i, err;
  1012.  
  1013. -   /* Allocate buffers visible to WRIOP */
  1014. -   if (!ch->xsk_zc) {
  1015. -       for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
  1016. -           /* Also allocate skb shared info and alignment padding.
  1017. -            * There is one page for each Rx buffer. WRIOP sees
  1018. -            * the entire page except for a tailroom reserved for
  1019. -            * skb shared info
  1020. -            */
  1021. -           page = dev_alloc_pages(0);
  1022. -           if (!page)
  1023. -               goto err_alloc;
  1024. -
  1025. -           addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
  1026. -                       DMA_BIDIRECTIONAL);
  1027. -           if (unlikely(dma_mapping_error(dev, addr)))
  1028. -               goto err_map;
  1029. -
  1030. -           buf_array[i] = addr;
  1031. -
  1032. -           /* tracing point */
  1033. -           trace_dpaa2_eth_buf_seed(priv->net_dev,
  1034. -                        page_address(page),
  1035. -                        DPAA2_ETH_RX_BUF_RAW_SIZE,
  1036. -                        addr, priv->rx_buf_size,
  1037. -                        ch->bp->bpid);
  1038. -       }
  1039. -   } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
  1040. -       /* Allocate XSK buffers for AF_XDP fast path in batches
  1041. -        * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
  1042. -        * provide enough buffers at the moment
  1043. +   for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
  1044. +       /* Allocate buffer visible to WRIOP + skb shared info +
  1045. +        * alignment padding
  1046.          */
  1047. -       batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
  1048. -                        DPAA2_ETH_BUFS_PER_CMD);
  1049. -       if (!batch)
  1050. +       /* allocate one page for each Rx buffer. WRIOP sees
  1051. +        * the entire page except for a tailroom reserved for
  1052. +        * skb shared info
  1053. +        */
  1054. +       page = dev_alloc_pages(0);
  1055. +       if (!page)
  1056.             goto err_alloc;
  1057.  
  1058. -       for (i = 0; i < batch; i++) {
  1059. -           swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
  1060. -                              DPAA2_ETH_RX_HWA_SIZE);
  1061. -           swa->xsk.xdp_buff = xdp_buffs[i];
  1062. -
  1063. -           addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
  1064. -           if (unlikely(dma_mapping_error(dev, addr)))
  1065. -               goto err_map;
  1066. -
  1067. -           buf_array[i] = addr;
  1068. -
  1069. -           trace_dpaa2_xsk_buf_seed(priv->net_dev,
  1070. -                        xdp_buffs[i]->data_hard_start,
  1071. -                        DPAA2_ETH_RX_BUF_RAW_SIZE,
  1072. -                        addr, priv->rx_buf_size,
  1073. -                        ch->bp->bpid);
  1074. -       }
  1075. +       addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
  1076. +                   DMA_BIDIRECTIONAL);
  1077. +       if (unlikely(dma_mapping_error(dev, addr)))
  1078. +           goto err_map;
  1079. +
  1080. +       buf_array[i] = addr;
  1081. +
  1082. +       /* tracing point */
  1083. +       trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
  1084. +                    DPAA2_ETH_RX_BUF_RAW_SIZE,
  1085. +                    addr, priv->rx_buf_size,
  1086. +                    bpid);
  1087.     }
  1088.  
  1089.  release_bufs:
  1090.     /* In case the portal is busy, retry until successful */
  1091. -   while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
  1092. +   while ((err = dpaa2_io_service_release(ch->dpio, bpid,
  1093.                            buf_array, i)) == -EBUSY) {
  1094.         if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
  1095.             break;
  1096. @@ -1752,19 +1679,14 @@ release_bufs:
  1097.      * not much else we can do about it
  1098.      */
  1099.     if (err) {
  1100. -       dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
  1101. +       dpaa2_eth_free_bufs(priv, buf_array, i);
  1102.         return 0;
  1103.     }
  1104.  
  1105.     return i;
  1106.  
  1107.  err_map:
  1108. -   if (!ch->xsk_zc) {
  1109. -       __free_pages(page, 0);
  1110. -   } else {
  1111. -       for (; i < batch; i++)
  1112. -           xsk_buff_free(xdp_buffs[i]);
  1113. -   }
  1114. +   __free_pages(page, 0);
  1115.  err_alloc:
  1116.     /* If we managed to allocate at least some buffers,
  1117.      * release them to hardware
  1118. @@ -1775,64 +1697,39 @@ err_alloc:
  1119.     return 0;
  1120.  }
  1121.  
  1122. -static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
  1123. -                  struct dpaa2_eth_channel *ch)
  1124. +static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
  1125.  {
  1126. -   int i;
  1127. +   int i, j;
  1128.     int new_count;
  1129.  
  1130. -   for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
  1131. -       new_count = dpaa2_eth_add_bufs(priv, ch);
  1132. -       ch->buf_count += new_count;
  1133. +   for (j = 0; j < priv->num_channels; j++) {
  1134. +       for (i = 0; i < DPAA2_ETH_NUM_BUFS;
  1135. +            i += DPAA2_ETH_BUFS_PER_CMD) {
  1136. +           new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
  1137. +           priv->channel[j]->buf_count += new_count;
  1138.  
  1139. -       if (new_count < DPAA2_ETH_BUFS_PER_CMD)
  1140. -           return -ENOMEM;
  1141. +           if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
  1142. +               return -ENOMEM;
  1143. +           }
  1144. +       }
  1145.     }
  1146.  
  1147.     return 0;
  1148.  }
  1149.  
  1150. -static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
  1151. -{
  1152. -   struct net_device *net_dev = priv->net_dev;
  1153. -   struct dpaa2_eth_channel *channel;
  1154. -   int i, err = 0;
  1155. -
  1156. -   for (i = 0; i < priv->num_channels; i++) {
  1157. -       channel = priv->channel[i];
  1158. -
  1159. -       err = dpaa2_eth_seed_pool(priv, channel);
  1160. -
  1161. -       /* Not much to do; the buffer pool, though not filled up,
  1162. -        * may still contain some buffers which would enable us
  1163. -        * to limp on.
  1164. -        */
  1165. -       if (err)
  1166. -           netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
  1167. -                  channel->bp->dev->obj_desc.id,
  1168. -                  channel->bp->bpid);
  1169. -   }
  1170. -}
  1171. -
  1172.  /*
  1173. - * Drain the specified number of buffers from one of the DPNI's private buffer
  1174. - * pools.
  1175. + * Drain the specified number of buffers from the DPNI's private buffer pool.
  1176.   * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
  1177.   */
  1178. -static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
  1179. -                int count)
  1180. +static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
  1181.  {
  1182.     u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
  1183. -   bool xsk_zc = false;
  1184.     int retries = 0;
  1185. -   int i, ret;
  1186. -
  1187. -   for (i = 0; i < priv->num_channels; i++)
  1188. -       if (priv->channel[i]->bp->bpid == bpid)
  1189. -           xsk_zc = priv->channel[i]->xsk_zc;
  1190. +   int ret;
  1191.  
  1192.     do {
  1193. -       ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
  1194. +       ret = dpaa2_io_service_acquire(NULL, priv->bpid,
  1195. +                          buf_array, count);
  1196.         if (ret < 0) {
  1197.             if (ret == -EBUSY &&
  1198.                 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
  1199. @@ -1840,40 +1737,28 @@ static void dpaa2_eth_drain_bufs(struct
  1200.             netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
  1201.             return;
  1202.         }
  1203. -       dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
  1204. +       dpaa2_eth_free_bufs(priv, buf_array, ret);
  1205.         retries = 0;
  1206.     } while (ret);
  1207.  }
  1208.  
  1209. -static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
  1210. +static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
  1211.  {
  1212.     int i;
  1213.  
  1214. -   /* Drain the buffer pool */
  1215. -   dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
  1216. -   dpaa2_eth_drain_bufs(priv, bpid, 1);
  1217. +   dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
  1218. +   dpaa2_eth_drain_bufs(priv, 1);
  1219.  
  1220. -   /* Setup to zero the buffer count of all channels which were
  1221. -    * using this buffer pool.
  1222. -    */
  1223.     for (i = 0; i < priv->num_channels; i++)
  1224. -       if (priv->channel[i]->bp->bpid == bpid)
  1225. -           priv->channel[i]->buf_count = 0;
  1226. -}
  1227. -
  1228. -static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
  1229. -{
  1230. -   int i;
  1231. -
  1232. -   for (i = 0; i < priv->num_bps; i++)
  1233. -       dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
  1234. +       priv->channel[i]->buf_count = 0;
  1235.  }
  1236.  
  1237.  /* Function is called from softirq context only, so we don't need to guard
  1238.   * the access to percpu count
  1239.   */
  1240.  static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
  1241. -                struct dpaa2_eth_channel *ch)
  1242. +                struct dpaa2_eth_channel *ch,
  1243. +                u16 bpid)
  1244.  {
  1245.     int new_count;
  1246.  
  1247. @@ -1881,7 +1766,7 @@ static int dpaa2_eth_refill_pool(struct
  1248.         return 0;
  1249.  
  1250.     do {
  1251. -       new_count = dpaa2_eth_add_bufs(priv, ch);
  1252. +       new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
  1253.         if (unlikely(!new_count)) {
  1254.             /* Out of memory; abort for now, we'll try later on */
  1255.             break;
  1256. @@ -1945,7 +1830,6 @@ static int dpaa2_eth_poll(struct napi_st
  1257.     struct dpaa2_eth_fq *fq, *txc_fq = NULL;
  1258.     struct netdev_queue *nq;
  1259.     int store_cleaned, work_done;
  1260. -   bool work_done_zc = false;
  1261.     struct list_head rx_list;
  1262.     int retries = 0;
  1263.     u16 flowid;
  1264. @@ -1958,22 +1842,13 @@ static int dpaa2_eth_poll(struct napi_st
  1265.     INIT_LIST_HEAD(&rx_list);
  1266.     ch->rx_list = &rx_list;
  1267.  
  1268. -   if (ch->xsk_zc) {
  1269. -       work_done_zc = dpaa2_xsk_tx(priv, ch);
  1270. -       /* If we reached the XSK Tx per NAPI threshold, we're done */
  1271. -       if (work_done_zc) {
  1272. -           work_done = budget;
  1273. -           goto out;
  1274. -       }
  1275. -   }
  1276. -
  1277.     do {
  1278.         err = dpaa2_eth_pull_channel(ch);
  1279.         if (unlikely(err))
  1280.             break;
  1281.  
  1282.         /* Refill pool if appropriate */
  1283. -       dpaa2_eth_refill_pool(priv, ch);
  1284. +       dpaa2_eth_refill_pool(priv, ch, priv->bpid);
  1285.  
  1286.         store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
  1287.         if (store_cleaned <= 0)
  1288. @@ -2019,11 +1894,6 @@ static int dpaa2_eth_poll(struct napi_st
  1289.  out:
  1290.     netif_receive_skb_list(ch->rx_list);
  1291.  
  1292. -   if (ch->xsk_tx_pkts_sent) {
  1293. -       xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
  1294. -       ch->xsk_tx_pkts_sent = 0;
  1295. -   }
  1296. -
  1297.     if (txc_fq && txc_fq->dq_frames) {
  1298.         nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
  1299.         netdev_tx_completed_queue(nq, txc_fq->dq_frames,
  1300. @@ -2147,11 +2017,8 @@ static int dpaa2_eth_link_state_update(s
  1301.  
  1302.     /* When we manage the MAC/PHY using phylink there is no need
  1303.      * to manually update the netif_carrier.
  1304. -    * We can avoid locking because we are called from the "link changed"
  1305. -    * IRQ handler, which is the same as the "endpoint changed" IRQ handler
  1306. -    * (the writer to priv->mac), so we cannot race with it.
  1307.      */
  1308. -   if (dpaa2_mac_is_type_phy(priv->mac))
  1309. +   if (dpaa2_eth_is_type_phy(priv))
  1310.         goto out;
  1311.  
  1312.     /* Chech link state; speed / duplex changes are not treated yet */
  1313. @@ -2180,9 +2047,15 @@ static int dpaa2_eth_open(struct net_dev
  1314.     struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  1315.     int err;
  1316.  
  1317. -   dpaa2_eth_seed_pools(priv);
  1318. -
  1319. -   mutex_lock(&priv->mac_lock);
  1320. +   err = dpaa2_eth_seed_pool(priv, priv->bpid);
  1321. +   if (err) {
  1322. +       /* Not much to do; the buffer pool, though not filled up,
  1323. +        * may still contain some buffers which would enable us
  1324. +        * to limp on.
  1325. +        */
  1326. +       netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
  1327. +              priv->dpbp_dev->obj_desc.id, priv->bpid);
  1328. +   }
  1329.  
  1330.     if (!dpaa2_eth_is_type_phy(priv)) {
  1331.         /* We'll only start the txqs when the link is actually ready;
  1332. @@ -2202,21 +2075,20 @@ static int dpaa2_eth_open(struct net_dev
  1333.  
  1334.     err = dpni_enable(priv->mc_io, 0, priv->mc_token);
  1335.     if (err < 0) {
  1336. -       mutex_unlock(&priv->mac_lock);
  1337.         netdev_err(net_dev, "dpni_enable() failed\n");
  1338.         goto enable_err;
  1339.     }
  1340.  
  1341. -   if (dpaa2_eth_is_type_phy(priv))
  1342. +   if (dpaa2_eth_is_type_phy(priv)) {
  1343.         dpaa2_mac_start(priv->mac);
  1344. -
  1345. -   mutex_unlock(&priv->mac_lock);
  1346. +       phylink_start(priv->mac->phylink);
  1347. +   }
  1348.  
  1349.     return 0;
  1350.  
  1351.  enable_err:
  1352.     dpaa2_eth_disable_ch_napi(priv);
  1353. -   dpaa2_eth_drain_pools(priv);
  1354. +   dpaa2_eth_drain_pool(priv);
  1355.     return err;
  1356.  }
  1357.  
  1358. @@ -2283,17 +2155,14 @@ static int dpaa2_eth_stop(struct net_dev
  1359.     int dpni_enabled = 0;
  1360.     int retries = 10;
  1361.  
  1362. -   mutex_lock(&priv->mac_lock);
  1363. -
  1364.     if (dpaa2_eth_is_type_phy(priv)) {
  1365. +       phylink_stop(priv->mac->phylink);
  1366.         dpaa2_mac_stop(priv->mac);
  1367.     } else {
  1368.         netif_tx_stop_all_queues(net_dev);
  1369.         netif_carrier_off(net_dev);
  1370.     }
  1371.  
  1372. -   mutex_unlock(&priv->mac_lock);
  1373. -
  1374.     /* On dpni_disable(), the MC firmware will:
  1375.      * - stop MAC Rx and wait for all Rx frames to be enqueued to software
  1376.      * - cut off WRIOP dequeues from egress FQs and wait until transmission
  1377. @@ -2324,7 +2193,7 @@ static int dpaa2_eth_stop(struct net_dev
  1378.     dpaa2_eth_disable_ch_napi(priv);
  1379.  
  1380.     /* Empty the buffer pool */
  1381. -   dpaa2_eth_drain_pools(priv);
  1382. +   dpaa2_eth_drain_pool(priv);
  1383.  
  1384.     /* Empty the Scatter-Gather Buffer cache */
  1385.     dpaa2_eth_sgt_cache_drain(priv);
  1386. @@ -2619,20 +2488,12 @@ static int dpaa2_eth_ts_ioctl(struct net
  1387.  static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1388.  {
  1389.     struct dpaa2_eth_priv *priv = netdev_priv(dev);
  1390. -   int err;
  1391.  
  1392.     if (cmd == SIOCSHWTSTAMP)
  1393.         return dpaa2_eth_ts_ioctl(dev, rq, cmd);
  1394.  
  1395. -   mutex_lock(&priv->mac_lock);
  1396. -
  1397. -   if (dpaa2_eth_is_type_phy(priv)) {
  1398. -       err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
  1399. -       mutex_unlock(&priv->mac_lock);
  1400. -       return err;
  1401. -   }
  1402. -
  1403. -   mutex_unlock(&priv->mac_lock);
  1404. +   if (dpaa2_eth_is_type_phy(priv))
  1405. +       return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
  1406.  
  1407.     return -EOPNOTSUPP;
  1408.  }
  1409. @@ -2741,7 +2602,7 @@ static int dpaa2_eth_setup_xdp(struct ne
  1410.     need_update = (!!priv->xdp_prog != !!prog);
  1411.  
  1412.     if (up)
  1413. -       dev_close(dev);
  1414. +       dpaa2_eth_stop(dev);
  1415.  
  1416.     /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
  1417.      * Also, when switching between xdp/non-xdp modes we need to reconfigure
  1418. @@ -2769,7 +2630,7 @@ static int dpaa2_eth_setup_xdp(struct ne
  1419.     }
  1420.  
  1421.     if (up) {
  1422. -       err = dev_open(dev, NULL);
  1423. +       err = dpaa2_eth_open(dev);
  1424.         if (err)
  1425.             return err;
  1426.     }
  1427. @@ -2780,7 +2641,7 @@ out_err:
  1428.     if (prog)
  1429.         bpf_prog_sub(prog, priv->num_channels);
  1430.     if (up)
  1431. -       dev_open(dev, NULL);
  1432. +       dpaa2_eth_open(dev);
  1433.  
  1434.     return err;
  1435.  }
  1436. @@ -2790,8 +2651,6 @@ static int dpaa2_eth_xdp(struct net_devi
  1437.     switch (xdp->command) {
  1438.     case XDP_SETUP_PROG:
  1439.         return dpaa2_eth_setup_xdp(dev, xdp->prog);
  1440. -   case XDP_SETUP_XSK_POOL:
  1441. -       return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
  1442.     default:
  1443.         return -EINVAL;
  1444.     }
  1445. @@ -3022,7 +2881,6 @@ static const struct net_device_ops dpaa2
  1446.     .ndo_change_mtu = dpaa2_eth_change_mtu,
  1447.     .ndo_bpf = dpaa2_eth_xdp,
  1448.     .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
  1449. -   .ndo_xsk_wakeup = dpaa2_xsk_wakeup,
  1450.     .ndo_setup_tc = dpaa2_eth_setup_tc,
  1451.     .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
  1452.     .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
  1453. @@ -3037,11 +2895,7 @@ static void dpaa2_eth_cdan_cb(struct dpa
  1454.     /* Update NAPI statistics */
  1455.     ch->stats.cdan++;
  1456.  
  1457. -   /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
  1458. -    * so that it can be rescheduled again.
  1459. -    */
  1460. -   if (!napi_if_scheduled_mark_missed(&ch->napi))
  1461. -       napi_schedule(&ch->napi);
  1462. +   napi_schedule(&ch->napi);
  1463.  }
  1464.  
  1465.  /* Allocate and configure a DPCON object */
  1466. @@ -3054,12 +2908,10 @@ static struct fsl_mc_device *dpaa2_eth_s
  1467.     err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
  1468.                      FSL_MC_POOL_DPCON, &dpcon);
  1469.     if (err) {
  1470. -       if (err == -ENXIO) {
  1471. -           dev_dbg(dev, "Waiting for DPCON\n");
  1472. +       if (err == -ENXIO)
  1473.             err = -EPROBE_DEFER;
  1474. -       } else {
  1475. +       else
  1476.             dev_info(dev, "Not enough DPCONs, will go on as-is\n");
  1477. -       }
  1478.         return ERR_PTR(err);
  1479.     }
  1480.  
  1481. @@ -3169,9 +3021,7 @@ static int dpaa2_eth_setup_dpio(struct d
  1482.         channel = dpaa2_eth_alloc_channel(priv);
  1483.         if (IS_ERR_OR_NULL(channel)) {
  1484.             err = PTR_ERR_OR_ZERO(channel);
  1485. -           if (err == -EPROBE_DEFER)
  1486. -               dev_dbg(dev, "waiting for affine channel\n");
  1487. -           else
  1488. +           if (err != -EPROBE_DEFER)
  1489.                 dev_info(dev,
  1490.                      "No affine channel for cpu %d and above\n", i);
  1491.             goto err_alloc_ch;
  1492. @@ -3354,14 +3204,13 @@ static void dpaa2_eth_setup_fqs(struct d
  1493.     dpaa2_eth_set_fq_affinity(priv);
  1494.  }
  1495.  
  1496. -/* Allocate and configure a buffer pool */
  1497. -struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
  1498. +/* Allocate and configure one buffer pool for each interface */
  1499. +static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
  1500.  {
  1501. -   struct device *dev = priv->net_dev->dev.parent;
  1502. +   int err;
  1503.     struct fsl_mc_device *dpbp_dev;
  1504. +   struct device *dev = priv->net_dev->dev.parent;
  1505.     struct dpbp_attr dpbp_attrs;
  1506. -   struct dpaa2_eth_bp *bp;
  1507. -   int err;
  1508.  
  1509.     err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
  1510.                      &dpbp_dev);
  1511. @@ -3370,16 +3219,12 @@ struct dpaa2_eth_bp *dpaa2_eth_allocate_
  1512.             err = -EPROBE_DEFER;
  1513.         else
  1514.             dev_err(dev, "DPBP device allocation failed\n");
  1515. -       return ERR_PTR(err);
  1516. +       return err;
  1517.     }
  1518.  
  1519. -   bp = kzalloc(sizeof(*bp), GFP_KERNEL);
  1520. -   if (!bp) {
  1521. -       err = -ENOMEM;
  1522. -       goto err_alloc;
  1523. -   }
  1524. +   priv->dpbp_dev = dpbp_dev;
  1525.  
  1526. -   err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
  1527. +   err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
  1528.             &dpbp_dev->mc_handle);
  1529.     if (err) {
  1530.         dev_err(dev, "dpbp_open() failed\n");
  1531. @@ -3404,11 +3249,9 @@ struct dpaa2_eth_bp *dpaa2_eth_allocate_
  1532.         dev_err(dev, "dpbp_get_attributes() failed\n");
  1533.         goto err_get_attr;
  1534.     }
  1535. +   priv->bpid = dpbp_attrs.bpid;
  1536.  
  1537. -   bp->dev = dpbp_dev;
  1538. -   bp->bpid = dpbp_attrs.bpid;
  1539. -
  1540. -   return bp;
  1541. +   return 0;
  1542.  
  1543.  err_get_attr:
  1544.     dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
  1545. @@ -3416,58 +3259,17 @@ err_enable:
  1546.  err_reset:
  1547.     dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
  1548.  err_open:
  1549. -   kfree(bp);
  1550. -err_alloc:
  1551.     fsl_mc_object_free(dpbp_dev);
  1552.  
  1553. -   return ERR_PTR(err);
  1554. -}
  1555. -
  1556. -static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
  1557. -{
  1558. -   struct dpaa2_eth_bp *bp;
  1559. -   int i;
  1560. -
  1561. -   bp = dpaa2_eth_allocate_dpbp(priv);
  1562. -   if (IS_ERR(bp))
  1563. -       return PTR_ERR(bp);
  1564. -
  1565. -   priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
  1566. -   priv->num_bps++;
  1567. -
  1568. -   for (i = 0; i < priv->num_channels; i++)
  1569. -       priv->channel[i]->bp = bp;
  1570. -
  1571. -   return 0;
  1572. -}
  1573. -
  1574. -void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
  1575. -{
  1576. -   int idx_bp;
  1577. -
  1578. -   /* Find the index at which this BP is stored */
  1579. -   for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
  1580. -       if (priv->bp[idx_bp] == bp)
  1581. -           break;
  1582. -
  1583. -   /* Drain the pool and disable the associated MC object */
  1584. -   dpaa2_eth_drain_pool(priv, bp->bpid);
  1585. -   dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
  1586. -   dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
  1587. -   fsl_mc_object_free(bp->dev);
  1588. -   kfree(bp);
  1589. -
  1590. -   /* Move the last in use DPBP over in this position */
  1591. -   priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
  1592. -   priv->num_bps--;
  1593. +   return err;
  1594.  }
  1595.  
  1596. -static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
  1597. +static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
  1598.  {
  1599. -   int i;
  1600. -
  1601. -   for (i = 0; i < priv->num_bps; i++)
  1602. -       dpaa2_eth_free_dpbp(priv, priv->bp[i]);
  1603. +   dpaa2_eth_drain_pool(priv);
  1604. +   dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
  1605. +   dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
  1606. +   fsl_mc_object_free(priv->dpbp_dev);
  1607.  }
  1608.  
  1609.  static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
  1610. @@ -3808,7 +3610,7 @@ static int dpaa2_eth_setup_dpni(struct f
  1611.         dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
  1612.             priv->dpni_ver_major, priv->dpni_ver_minor,
  1613.             DPNI_VER_MAJOR, DPNI_VER_MINOR);
  1614. -       err = -EOPNOTSUPP;
  1615. +       err = -ENOTSUPP;
  1616.         goto close;
  1617.     }
  1618.  
  1619. @@ -4352,16 +4154,15 @@ out:
  1620.   */
  1621.  static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
  1622.  {
  1623. -   struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
  1624.     struct net_device *net_dev = priv->net_dev;
  1625. -   struct dpni_pools_cfg pools_params = { 0 };
  1626.     struct device *dev = net_dev->dev.parent;
  1627. +   struct dpni_pools_cfg pools_params;
  1628.     struct dpni_error_cfg err_cfg;
  1629.     int err = 0;
  1630.     int i;
  1631.  
  1632.     pools_params.num_dpbp = 1;
  1633. -   pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
  1634. +   pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
  1635.     pools_params.pools[0].backup_pool = 0;
  1636.     pools_params.pools[0].buffer_size = priv->rx_buf_size;
  1637.     err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
  1638. @@ -4625,10 +4426,8 @@ static int dpaa2_eth_connect_mac(struct
  1639.     dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
  1640.     dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
  1641.  
  1642. -   if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) {
  1643. -       netdev_dbg(priv->net_dev, "waiting for mac\n");
  1644. +   if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
  1645.         return PTR_ERR(dpmac_dev);
  1646. -   }
  1647.  
  1648.     if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
  1649.         return 0;
  1650. @@ -4644,29 +4443,22 @@ static int dpaa2_eth_connect_mac(struct
  1651.     err = dpaa2_mac_open(mac);
  1652.     if (err)
  1653.         goto err_free_mac;
  1654. +   priv->mac = mac;
  1655.  
  1656. -   if (dpaa2_mac_is_type_phy(mac)) {
  1657. +   if (dpaa2_eth_is_type_phy(priv)) {
  1658.         err = dpaa2_mac_connect(mac);
  1659. -       if (err) {
  1660. -           if (err == -EPROBE_DEFER)
  1661. -               netdev_dbg(priv->net_dev,
  1662. -                      "could not connect to MAC\n");
  1663. -           else
  1664. -               netdev_err(priv->net_dev,
  1665. -                      "Error connecting to the MAC endpoint: %pe",
  1666. -                      ERR_PTR(err));
  1667. +       if (err && err != -EPROBE_DEFER)
  1668. +           netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
  1669. +                  ERR_PTR(err));
  1670. +       if (err)
  1671.             goto err_close_mac;
  1672. -       }
  1673.     }
  1674.  
  1675. -   mutex_lock(&priv->mac_lock);
  1676. -   priv->mac = mac;
  1677. -   mutex_unlock(&priv->mac_lock);
  1678. -
  1679.     return 0;
  1680.  
  1681.  err_close_mac:
  1682.     dpaa2_mac_close(mac);
  1683. +   priv->mac = NULL;
  1684.  err_free_mac:
  1685.     kfree(mac);
  1686.     return err;
  1687. @@ -4674,21 +4466,15 @@ err_free_mac:
  1688.  
  1689.  static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
  1690.  {
  1691. -   struct dpaa2_mac *mac;
  1692. -
  1693. -   mutex_lock(&priv->mac_lock);
  1694. -   mac = priv->mac;
  1695. -   priv->mac = NULL;
  1696. -   mutex_unlock(&priv->mac_lock);
  1697. +   if (dpaa2_eth_is_type_phy(priv))
  1698. +       dpaa2_mac_disconnect(priv->mac);
  1699.  
  1700. -   if (!mac)
  1701. +   if (!dpaa2_eth_has_mac(priv))
  1702.         return;
  1703.  
  1704. -   if (dpaa2_mac_is_type_phy(mac))
  1705. -       dpaa2_mac_disconnect(mac);
  1706. -
  1707. -   dpaa2_mac_close(mac);
  1708. -   kfree(mac);
  1709. +   dpaa2_mac_close(priv->mac);
  1710. +   kfree(priv->mac);
  1711. +   priv->mac = NULL;
  1712.  }
  1713.  
  1714.  static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
  1715. @@ -4698,7 +4484,6 @@ static irqreturn_t dpni_irq0_handler_thr
  1716.     struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
  1717.     struct net_device *net_dev = dev_get_drvdata(dev);
  1718.     struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  1719. -   bool had_mac;
  1720.     int err;
  1721.  
  1722.     err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
  1723. @@ -4715,15 +4500,12 @@ static irqreturn_t dpni_irq0_handler_thr
  1724.         dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
  1725.         dpaa2_eth_update_tx_fqids(priv);
  1726.  
  1727. -       /* We can avoid locking because the "endpoint changed" IRQ
  1728. -        * handler is the only one who changes priv->mac at runtime,
  1729. -        * so we are not racing with anyone.
  1730. -        */
  1731. -       had_mac = !!priv->mac;
  1732. -       if (had_mac)
  1733. +       rtnl_lock();
  1734. +       if (dpaa2_eth_has_mac(priv))
  1735.             dpaa2_eth_disconnect_mac(priv);
  1736.         else
  1737.             dpaa2_eth_connect_mac(priv);
  1738. +       rtnl_unlock();
  1739.     }
  1740.  
  1741.     return IRQ_HANDLED;
  1742. @@ -4819,9 +4601,6 @@ static int dpaa2_eth_probe(struct fsl_mc
  1743.  
  1744.     priv = netdev_priv(net_dev);
  1745.     priv->net_dev = net_dev;
  1746. -   SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port);
  1747. -
  1748. -   mutex_init(&priv->mac_lock);
  1749.  
  1750.     priv->iommu_domain = iommu_get_domain_for_dev(dev);
  1751.  
  1752. @@ -4844,12 +4623,10 @@ static int dpaa2_eth_probe(struct fsl_mc
  1753.     err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
  1754.                      &priv->mc_io);
  1755.     if (err) {
  1756. -       if (err == -ENXIO) {
  1757. -           dev_dbg(dev, "waiting for MC portal\n");
  1758. +       if (err == -ENXIO)
  1759.             err = -EPROBE_DEFER;
  1760. -       } else {
  1761. +       else
  1762.             dev_err(dev, "MC portal allocation failed\n");
  1763. -       }
  1764.         goto err_portal_alloc;
  1765.     }
  1766.  
  1767. @@ -4864,7 +4641,7 @@ static int dpaa2_eth_probe(struct fsl_mc
  1768.  
  1769.     dpaa2_eth_setup_fqs(priv);
  1770.  
  1771. -   err = dpaa2_eth_setup_default_dpbp(priv);
  1772. +   err = dpaa2_eth_setup_dpbp(priv);
  1773.     if (err)
  1774.         goto err_dpbp_setup;
  1775.  
  1776. @@ -4930,10 +4707,6 @@ static int dpaa2_eth_probe(struct fsl_mc
  1777.     }
  1778.  #endif
  1779.  
  1780. -   err = dpaa2_eth_connect_mac(priv);
  1781. -   if (err)
  1782. -       goto err_connect_mac;
  1783. -
  1784.     err = dpaa2_eth_setup_irqs(dpni_dev);
  1785.     if (err) {
  1786.         netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
  1787. @@ -4946,6 +4719,10 @@ static int dpaa2_eth_probe(struct fsl_mc
  1788.         priv->do_link_poll = true;
  1789.     }
  1790.  
  1791. +   err = dpaa2_eth_connect_mac(priv);
  1792. +   if (err)
  1793. +       goto err_connect_mac;
  1794. +
  1795.     err = dpaa2_eth_dl_alloc(priv);
  1796.     if (err)
  1797.         goto err_dl_register;
  1798. @@ -4979,13 +4756,13 @@ err_dl_port_add:
  1799.  err_dl_trap_register:
  1800.     dpaa2_eth_dl_free(priv);
  1801.  err_dl_register:
  1802. +   dpaa2_eth_disconnect_mac(priv);
  1803. +err_connect_mac:
  1804.     if (priv->do_link_poll)
  1805.         kthread_stop(priv->poll_thread);
  1806.     else
  1807.         fsl_mc_free_irqs(dpni_dev);
  1808.  err_poll_thread:
  1809. -   dpaa2_eth_disconnect_mac(priv);
  1810. -err_connect_mac:
  1811.     dpaa2_eth_free_rings(priv);
  1812.  err_alloc_rings:
  1813.  err_csum:
  1814. @@ -5000,7 +4777,7 @@ err_alloc_percpu_extras:
  1815.  err_alloc_percpu_stats:
  1816.     dpaa2_eth_del_ch_napi(priv);
  1817.  err_bind:
  1818. -   dpaa2_eth_free_dpbps(priv);
  1819. +   dpaa2_eth_free_dpbp(priv);
  1820.  err_dpbp_setup:
  1821.     dpaa2_eth_free_dpio(priv);
  1822.  err_dpio_setup:
  1823. @@ -5033,6 +4810,9 @@ static int dpaa2_eth_remove(struct fsl_m
  1824.  #endif
  1825.  
  1826.     unregister_netdev(net_dev);
  1827. +   rtnl_lock();
  1828. +   dpaa2_eth_disconnect_mac(priv);
  1829. +   rtnl_unlock();
  1830.  
  1831.     dpaa2_eth_dl_port_del(priv);
  1832.     dpaa2_eth_dl_traps_unregister(priv);
  1833. @@ -5043,7 +4823,6 @@ static int dpaa2_eth_remove(struct fsl_m
  1834.     else
  1835.         fsl_mc_free_irqs(ls_dev);
  1836.  
  1837. -   dpaa2_eth_disconnect_mac(priv);
  1838.     dpaa2_eth_free_rings(priv);
  1839.     free_percpu(priv->fd);
  1840.     free_percpu(priv->sgt_cache);
  1841. @@ -5051,7 +4830,7 @@ static int dpaa2_eth_remove(struct fsl_m
  1842.     free_percpu(priv->percpu_extras);
  1843.  
  1844.     dpaa2_eth_del_ch_napi(priv);
  1845. -   dpaa2_eth_free_dpbps(priv);
  1846. +   dpaa2_eth_free_dpbp(priv);
  1847.     dpaa2_eth_free_dpio(priv);
  1848.     dpaa2_eth_free_dpni(priv);
  1849.     if (priv->onestep_reg_base)
  1850. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
  1851. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h  2022-12-25 22:41:39.000000000 +0100
  1852. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h  2022-12-31 15:56:55.295955322 +0100
  1853. @@ -1,6 +1,6 @@
  1854.  /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
  1855.  /* Copyright 2014-2016 Freescale Semiconductor Inc.
  1856. - * Copyright 2016-2022 NXP
  1857. + * Copyright 2016-2020 NXP
  1858.   */
  1859.  
  1860.  #ifndef __DPAA2_ETH_H
  1861. @@ -53,12 +53,6 @@
  1862.   */
  1863.  #define DPAA2_ETH_TXCONF_PER_NAPI  256
  1864.  
  1865. -/* Maximum number of Tx frames to be processed in a single NAPI
  1866. - * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI
  1867. - * to maximize the throughput.
  1868. - */
  1869. -#define DPAA2_ETH_TX_ZC_PER_NAPI   DPAA2_ETH_TXCONF_PER_NAPI
  1870. -
  1871.  /* Buffer qouta per channel. We want to keep in check number of ingress frames
  1872.   * in flight: for small sized frames, congestion group taildrop may kick in
  1873.   * first; for large sizes, Rx FQ taildrop threshold will ensure only a
  1874. @@ -115,14 +109,6 @@
  1875.  #define DPAA2_ETH_RX_BUF_ALIGN_REV1    256
  1876.  #define DPAA2_ETH_RX_BUF_ALIGN     64
  1877.  
  1878. -/* The firmware allows assigning multiple buffer pools to a single DPNI -
  1879. - * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for
  1880. - * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs
  1881. - * object: the default and 8 other distinct buffer pools, one for each queue.
  1882. - */
  1883. -#define DPAA2_ETH_DEFAULT_BP_IDX   0
  1884. -#define DPAA2_ETH_MAX_BPS      9
  1885. -
  1886.  /* We are accommodating a skb backpointer and some S/G info
  1887.   * in the frame's software annotation. The hardware
  1888.   * options are either 0 or 64, so we choose the latter.
  1889. @@ -136,7 +122,6 @@ enum dpaa2_eth_swa_type {
  1890.     DPAA2_ETH_SWA_SINGLE,
  1891.     DPAA2_ETH_SWA_SG,
  1892.     DPAA2_ETH_SWA_XDP,
  1893. -   DPAA2_ETH_SWA_XSK,
  1894.     DPAA2_ETH_SWA_SW_TSO,
  1895.  };
  1896.  
  1897. @@ -159,10 +144,6 @@ struct dpaa2_eth_swa {
  1898.             struct xdp_frame *xdpf;
  1899.         } xdp;
  1900.         struct {
  1901. -           struct xdp_buff *xdp_buff;
  1902. -           int sgt_size;
  1903. -       } xsk;
  1904. -       struct {
  1905.             struct sk_buff *skb;
  1906.             int num_sg;
  1907.             int sgt_size;
  1908. @@ -440,19 +421,12 @@ enum dpaa2_eth_fq_type {
  1909.  };
  1910.  
  1911.  struct dpaa2_eth_priv;
  1912. -struct dpaa2_eth_channel;
  1913. -struct dpaa2_eth_fq;
  1914.  
  1915.  struct dpaa2_eth_xdp_fds {
  1916.     struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
  1917.     ssize_t num;
  1918.  };
  1919.  
  1920. -typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv,
  1921. -                   struct dpaa2_eth_channel *ch,
  1922. -                   const struct dpaa2_fd *fd,
  1923. -                   struct dpaa2_eth_fq *fq);
  1924. -
  1925.  struct dpaa2_eth_fq {
  1926.     u32 fqid;
  1927.     u32 tx_qdbin;
  1928. @@ -465,7 +439,10 @@ struct dpaa2_eth_fq {
  1929.     struct dpaa2_eth_channel *channel;
  1930.     enum dpaa2_eth_fq_type type;
  1931.  
  1932. -   dpaa2_eth_consume_cb_t *consume;
  1933. +   void (*consume)(struct dpaa2_eth_priv *priv,
  1934. +           struct dpaa2_eth_channel *ch,
  1935. +           const struct dpaa2_fd *fd,
  1936. +           struct dpaa2_eth_fq *fq);
  1937.     struct dpaa2_eth_fq_stats stats;
  1938.  
  1939.     struct dpaa2_eth_xdp_fds xdp_redirect_fds;
  1940. @@ -477,11 +454,6 @@ struct dpaa2_eth_ch_xdp {
  1941.     unsigned int res;
  1942.  };
  1943.  
  1944. -struct dpaa2_eth_bp {
  1945. -   struct fsl_mc_device *dev;
  1946. -   int bpid;
  1947. -};
  1948. -
  1949.  struct dpaa2_eth_channel {
  1950.     struct dpaa2_io_notification_ctx nctx;
  1951.     struct fsl_mc_device *dpcon;
  1952. @@ -500,11 +472,6 @@ struct dpaa2_eth_channel {
  1953.     /* Buffers to be recycled back in the buffer pool */
  1954.     u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
  1955.     int recycled_bufs_cnt;
  1956. -
  1957. -   bool xsk_zc;
  1958. -   int xsk_tx_pkts_sent;
  1959. -   struct xsk_buff_pool *xsk_pool;
  1960. -   struct dpaa2_eth_bp *bp;
  1961.  };
  1962.  
  1963.  struct dpaa2_eth_dist_fields {
  1964. @@ -539,7 +506,7 @@ struct dpaa2_eth_trap_data {
  1965.  
  1966.  #define DPAA2_ETH_DEFAULT_COPYBREAK    512
  1967.  
  1968. -#define DPAA2_ETH_ENQUEUE_MAX_FDS  256
  1969. +#define DPAA2_ETH_ENQUEUE_MAX_FDS  200
  1970.  struct dpaa2_eth_fds {
  1971.     struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
  1972.  };
  1973. @@ -568,16 +535,14 @@ struct dpaa2_eth_priv {
  1974.     u8 ptp_correction_off;
  1975.     void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
  1976.                         u32 offset, u8 udp);
  1977. +   struct fsl_mc_device *dpbp_dev;
  1978.     u16 rx_buf_size;
  1979. +   u16 bpid;
  1980.     struct iommu_domain *iommu_domain;
  1981.  
  1982.     enum hwtstamp_tx_types tx_tstamp_type;  /* Tx timestamping type */
  1983.     bool rx_tstamp;             /* Rx timestamping enabled */
  1984.  
  1985. -   /* Buffer pool management */
  1986. -   struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS];
  1987. -   int num_bps;
  1988. -
  1989.     u16 tx_qdid;
  1990.     struct fsl_mc_io *mc_io;
  1991.     /* Cores which have an affine DPIO/DPCON.
  1992. @@ -615,8 +580,6 @@ struct dpaa2_eth_priv {
  1993.  #endif
  1994.  
  1995.     struct dpaa2_mac *mac;
  1996. -   /* Serializes changes to priv->mac */
  1997. -   struct mutex        mac_lock;
  1998.     struct workqueue_struct *dpaa2_ptp_wq;
  1999.     struct work_struct  tx_onestep_tstamp;
  2000.     struct sk_buff_head tx_skbs;
  2001. @@ -770,15 +733,16 @@ static inline unsigned int dpaa2_eth_rx_
  2002.  
  2003.  static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
  2004.  {
  2005. -   lockdep_assert_held(&priv->mac_lock);
  2006. +   if (priv->mac &&
  2007. +       (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
  2008. +        priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
  2009. +       return true;
  2010.  
  2011. -   return dpaa2_mac_is_type_phy(priv->mac);
  2012. +   return false;
  2013.  }
  2014.  
  2015.  static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
  2016.  {
  2017. -   lockdep_assert_held(&priv->mac_lock);
  2018. -
  2019.     return priv->mac ? true : false;
  2020.  }
  2021.  
  2022. @@ -807,54 +771,4 @@ void dpaa2_eth_dl_traps_unregister(struc
  2023.  
  2024.  struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
  2025.                           struct dpaa2_fapr *fapr);
  2026. -
  2027. -struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
  2028. -void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp);
  2029. -
  2030. -struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
  2031. -                   struct dpaa2_eth_channel *ch,
  2032. -                   const struct dpaa2_fd *fd, u32 fd_length,
  2033. -                   void *fd_vaddr);
  2034. -
  2035. -void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
  2036. -              struct dpaa2_eth_channel *ch,
  2037. -              const struct dpaa2_fd *fd, void *vaddr,
  2038. -              struct dpaa2_eth_fq *fq,
  2039. -              struct rtnl_link_stats64 *percpu_stats,
  2040. -              struct sk_buff *skb);
  2041. -
  2042. -void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
  2043. -         struct dpaa2_eth_channel *ch,
  2044. -         const struct dpaa2_fd *fd,
  2045. -         struct dpaa2_eth_fq *fq);
  2046. -
  2047. -struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
  2048. -void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv,
  2049. -            struct dpaa2_eth_bp *bp);
  2050. -
  2051. -void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr);
  2052. -void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
  2053. -              struct dpaa2_eth_channel *ch,
  2054. -              dma_addr_t addr);
  2055. -
  2056. -void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
  2057. -              struct dpaa2_eth_channel *ch,
  2058. -              struct dpaa2_fd *fd,
  2059. -              void *buf_start, u16 queue_id);
  2060. -
  2061. -int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
  2062. -int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
  2063. -
  2064. -void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
  2065. -             struct dpaa2_eth_channel *ch,
  2066. -             struct dpaa2_eth_fq *fq,
  2067. -             const struct dpaa2_fd *fd, bool in_napi);
  2068. -bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
  2069. -         struct dpaa2_eth_channel *ch);
  2070. -
  2071. -/* SGT (Scatter-Gather Table) cache management */
  2072. -void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv);
  2073. -
  2074. -void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf);
  2075. -
  2076.  #endif /* __DPAA2_H */
  2077. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
  2078. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c  2022-12-25 22:41:39.000000000 +0100
  2079. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c  2022-12-31 15:56:55.295955322 +0100
  2080. @@ -1,6 +1,7 @@
  2081.  // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  2082.  /* Copyright 2014-2016 Freescale Semiconductor Inc.
  2083. - * Copyright 2016-2022 NXP
  2084. + * Copyright 2016 NXP
  2085. + * Copyright 2020 NXP
  2086.   */
  2087.  
  2088.  #include <linux/net_tstamp.h>
  2089. @@ -85,16 +86,11 @@ static void dpaa2_eth_get_drvinfo(struct
  2090.  static int dpaa2_eth_nway_reset(struct net_device *net_dev)
  2091.  {
  2092.     struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2093. -   int err = -EOPNOTSUPP;
  2094. -
  2095. -   mutex_lock(&priv->mac_lock);
  2096.  
  2097.     if (dpaa2_eth_is_type_phy(priv))
  2098. -       err = phylink_ethtool_nway_reset(priv->mac->phylink);
  2099. -
  2100. -   mutex_unlock(&priv->mac_lock);
  2101. +       return phylink_ethtool_nway_reset(priv->mac->phylink);
  2102.  
  2103. -   return err;
  2104. +   return -EOPNOTSUPP;
  2105.  }
  2106.  
  2107.  static int
  2108. @@ -102,18 +98,10 @@ dpaa2_eth_get_link_ksettings(struct net_
  2109.                  struct ethtool_link_ksettings *link_settings)
  2110.  {
  2111.     struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2112. -   int err;
  2113.  
  2114. -   mutex_lock(&priv->mac_lock);
  2115. -
  2116. -   if (dpaa2_eth_is_type_phy(priv)) {
  2117. -       err = phylink_ethtool_ksettings_get(priv->mac->phylink,
  2118. -                           link_settings);
  2119. -       mutex_unlock(&priv->mac_lock);
  2120. -       return err;
  2121. -   }
  2122. -
  2123. -   mutex_unlock(&priv->mac_lock);
  2124. +   if (dpaa2_eth_is_type_phy(priv))
  2125. +       return phylink_ethtool_ksettings_get(priv->mac->phylink,
  2126. +                            link_settings);
  2127.  
  2128.     link_settings->base.autoneg = AUTONEG_DISABLE;
  2129.     if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
  2130. @@ -128,17 +116,11 @@ dpaa2_eth_set_link_ksettings(struct net_
  2131.                  const struct ethtool_link_ksettings *link_settings)
  2132.  {
  2133.     struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2134. -   int err = -EOPNOTSUPP;
  2135. -
  2136. -   mutex_lock(&priv->mac_lock);
  2137. -
  2138. -   if (dpaa2_eth_is_type_phy(priv))
  2139. -       err = phylink_ethtool_ksettings_set(priv->mac->phylink,
  2140. -                           link_settings);
  2141.  
  2142. -   mutex_unlock(&priv->mac_lock);
  2143. +   if (!dpaa2_eth_is_type_phy(priv))
  2144. +       return -ENOTSUPP;
  2145.  
  2146. -   return err;
  2147. +   return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
  2148.  }
  2149.  
  2150.  static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
  2151. @@ -147,16 +129,11 @@ static void dpaa2_eth_get_pauseparam(str
  2152.     struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2153.     u64 link_options = priv->link_state.options;
  2154.  
  2155. -   mutex_lock(&priv->mac_lock);
  2156. -
  2157.     if (dpaa2_eth_is_type_phy(priv)) {
  2158.         phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
  2159. -       mutex_unlock(&priv->mac_lock);
  2160.         return;
  2161.     }
  2162.  
  2163. -   mutex_unlock(&priv->mac_lock);
  2164. -
  2165.     pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
  2166.     pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
  2167.     pause->autoneg = AUTONEG_DISABLE;
  2168. @@ -175,17 +152,9 @@ static int dpaa2_eth_set_pauseparam(stru
  2169.         return -EOPNOTSUPP;
  2170.     }
  2171.  
  2172. -   mutex_lock(&priv->mac_lock);
  2173. -
  2174. -   if (dpaa2_eth_is_type_phy(priv)) {
  2175. -       err = phylink_ethtool_set_pauseparam(priv->mac->phylink,
  2176. -                            pause);
  2177. -       mutex_unlock(&priv->mac_lock);
  2178. -       return err;
  2179. -   }
  2180. -
  2181. -   mutex_unlock(&priv->mac_lock);
  2182. -
  2183. +   if (dpaa2_eth_is_type_phy(priv))
  2184. +       return phylink_ethtool_set_pauseparam(priv->mac->phylink,
  2185. +                             pause);
  2186.     if (pause->autoneg)
  2187.         return -EOPNOTSUPP;
  2188.  
  2189. @@ -217,6 +186,7 @@ static int dpaa2_eth_set_pauseparam(stru
  2190.  static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
  2191.                   u8 *data)
  2192.  {
  2193. +   struct dpaa2_eth_priv *priv = netdev_priv(netdev);
  2194.     u8 *p = data;
  2195.     int i;
  2196.  
  2197. @@ -230,17 +200,22 @@ static void dpaa2_eth_get_strings(struct
  2198.             strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
  2199.             p += ETH_GSTRING_LEN;
  2200.         }
  2201. -       dpaa2_mac_get_strings(p);
  2202. +       if (dpaa2_eth_has_mac(priv))
  2203. +           dpaa2_mac_get_strings(p);
  2204.         break;
  2205.     }
  2206.  }
  2207.  
  2208.  static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
  2209.  {
  2210. +   int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
  2211. +   struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2212. +
  2213.     switch (sset) {
  2214.     case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
  2215. -       return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS +
  2216. -              dpaa2_mac_get_sset_count();
  2217. +       if (dpaa2_eth_has_mac(priv))
  2218. +           num_ss_stats += dpaa2_mac_get_sset_count();
  2219. +       return num_ss_stats;
  2220.     default:
  2221.         return -EOPNOTSUPP;
  2222.     }
  2223. @@ -252,8 +227,17 @@ static void dpaa2_eth_get_ethtool_stats(
  2224.                     struct ethtool_stats *stats,
  2225.                     u64 *data)
  2226.  {
  2227. -   struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2228. +   int i = 0;
  2229. +   int j, k, err;
  2230. +   int num_cnt;
  2231.     union dpni_statistics dpni_stats;
  2232. +   u32 fcnt, bcnt;
  2233. +   u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
  2234. +   u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
  2235. +   u32 buf_cnt;
  2236. +   struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2237. +   struct dpaa2_eth_drv_stats *extras;
  2238. +   struct dpaa2_eth_ch_stats *ch_stats;
  2239.     int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
  2240.         sizeof(dpni_stats.page_0),
  2241.         sizeof(dpni_stats.page_1),
  2242. @@ -263,13 +247,6 @@ static void dpaa2_eth_get_ethtool_stats(
  2243.         sizeof(dpni_stats.page_5),
  2244.         sizeof(dpni_stats.page_6),
  2245.     };
  2246. -   u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
  2247. -   u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
  2248. -   struct dpaa2_eth_ch_stats *ch_stats;
  2249. -   struct dpaa2_eth_drv_stats *extras;
  2250. -   u32 buf_cnt, buf_cnt_total = 0;
  2251. -   int j, k, err, num_cnt, i = 0;
  2252. -   u32 fcnt, bcnt;
  2253.  
  2254.     memset(data, 0,
  2255.            sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
  2256. @@ -331,22 +308,15 @@ static void dpaa2_eth_get_ethtool_stats(
  2257.     *(data + i++) = fcnt_tx_total;
  2258.     *(data + i++) = bcnt_tx_total;
  2259.  
  2260. -   for (j = 0; j < priv->num_bps; j++) {
  2261. -       err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt);
  2262. -       if (err) {
  2263. -           netdev_warn(net_dev, "Buffer count query error %d\n", err);
  2264. -           return;
  2265. -       }
  2266. -       buf_cnt_total += buf_cnt;
  2267. +   err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
  2268. +   if (err) {
  2269. +       netdev_warn(net_dev, "Buffer count query error %d\n", err);
  2270. +       return;
  2271.     }
  2272. -   *(data + i++) = buf_cnt_total;
  2273. -
  2274. -   mutex_lock(&priv->mac_lock);
  2275. +   *(data + i++) = buf_cnt;
  2276.  
  2277.     if (dpaa2_eth_has_mac(priv))
  2278.         dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
  2279. -
  2280. -   mutex_unlock(&priv->mac_lock);
  2281.  }
  2282.  
  2283.  static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
  2284. @@ -906,29 +876,6 @@ restore_rx_usecs:
  2285.     return err;
  2286.  }
  2287.  
  2288. -static void dpaa2_eth_get_channels(struct net_device *net_dev,
  2289. -                  struct ethtool_channels *channels)
  2290. -{
  2291. -   struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  2292. -   int queue_count = dpaa2_eth_queue_count(priv);
  2293. -
  2294. -   channels->max_rx = queue_count;
  2295. -   channels->max_tx = queue_count;
  2296. -   channels->rx_count = queue_count;
  2297. -   channels->tx_count = queue_count;
  2298. -
  2299. -   /* Tx confirmation and Rx error */
  2300. -   channels->max_other = queue_count + 1;
  2301. -   channels->max_combined = channels->max_rx +
  2302. -                channels->max_tx +
  2303. -                channels->max_other;
  2304. -   /* Tx conf and Rx err */
  2305. -   channels->other_count = queue_count + 1;
  2306. -   channels->combined_count = channels->rx_count +
  2307. -                  channels->tx_count +
  2308. -                  channels->other_count;
  2309. -}
  2310. -
  2311.  const struct ethtool_ops dpaa2_ethtool_ops = {
  2312.     .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
  2313.                      ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
  2314. @@ -949,5 +896,4 @@ const struct ethtool_ops dpaa2_ethtool_o
  2315.     .set_tunable = dpaa2_eth_set_tunable,
  2316.     .get_coalesce = dpaa2_eth_get_coalesce,
  2317.     .set_coalesce = dpaa2_eth_set_coalesce,
  2318. -   .get_channels = dpaa2_eth_get_channels,
  2319.  };
  2320. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
  2321. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c  2022-12-25 22:41:39.000000000 +0100
  2322. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c  2022-12-31 15:56:55.295955322 +0100
  2323. @@ -105,7 +105,6 @@ static struct fwnode_handle *dpaa2_mac_g
  2324.          * thus the fwnode field is not yet set. Defer probe if we are
  2325.          * facing this situation.
  2326.          */
  2327. -       dev_dbg(dev, "dprc not finished probing\n");
  2328.         return ERR_PTR(-EPROBE_DEFER);
  2329.     }
  2330.  
  2331. @@ -236,6 +235,7 @@ static void dpaa2_mac_link_down(struct p
  2332.  }
  2333.  
  2334.  static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
  2335. +   .validate = phylink_generic_validate,
  2336.     .mac_select_pcs = dpaa2_mac_select_pcs,
  2337.     .mac_config = dpaa2_mac_config,
  2338.     .mac_link_up = dpaa2_mac_link_up,
  2339. @@ -264,10 +264,8 @@ static int dpaa2_pcs_create(struct dpaa2
  2340.  
  2341.     mdiodev = fwnode_mdio_find_device(node);
  2342.     fwnode_handle_put(node);
  2343. -   if (!mdiodev) {
  2344. -       netdev_dbg(mac->net_dev, "missing PCS device\n");
  2345. +   if (!mdiodev)
  2346.         return -EPROBE_DEFER;
  2347. -   }
  2348.  
  2349.     mac->pcs = lynx_pcs_create(mdiodev);
  2350.     if (!mac->pcs) {
  2351. @@ -338,20 +336,12 @@ static void dpaa2_mac_set_supported_inte
  2352.  
  2353.  void dpaa2_mac_start(struct dpaa2_mac *mac)
  2354.  {
  2355. -   ASSERT_RTNL();
  2356. -
  2357.     if (mac->serdes_phy)
  2358.         phy_power_on(mac->serdes_phy);
  2359. -
  2360. -   phylink_start(mac->phylink);
  2361.  }
  2362.  
  2363.  void dpaa2_mac_stop(struct dpaa2_mac *mac)
  2364.  {
  2365. -   ASSERT_RTNL();
  2366. -
  2367. -   phylink_stop(mac->phylink);
  2368. -
  2369.     if (mac->serdes_phy)
  2370.         phy_power_off(mac->serdes_phy);
  2371.  }
  2372. @@ -430,9 +420,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *
  2373.     }
  2374.     mac->phylink = phylink;
  2375.  
  2376. -   rtnl_lock();
  2377.     err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
  2378. -   rtnl_unlock();
  2379.     if (err) {
  2380.         netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
  2381.         goto err_phylink_destroy;
  2382. @@ -450,10 +438,10 @@ err_pcs_destroy:
  2383.  
  2384.  void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
  2385.  {
  2386. -   rtnl_lock();
  2387. -   phylink_disconnect_phy(mac->phylink);
  2388. -   rtnl_unlock();
  2389. +   if (!mac->phylink)
  2390. +       return;
  2391.  
  2392. +   phylink_disconnect_phy(mac->phylink);
  2393.     phylink_destroy(mac->phylink);
  2394.     dpaa2_pcs_destroy(mac);
  2395.     of_phy_put(mac->serdes_phy);
  2396. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
  2397. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h  2022-12-25 22:41:39.000000000 +0100
  2398. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h  2022-12-31 15:56:55.296955307 +0100
  2399. @@ -30,14 +30,8 @@ struct dpaa2_mac {
  2400.     struct phy *serdes_phy;
  2401.  };
  2402.  
  2403. -static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac)
  2404. -{
  2405. -   if (!mac)
  2406. -       return false;
  2407. -
  2408. -   return mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
  2409. -          mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE;
  2410. -}
  2411. +bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
  2412. +                struct fsl_mc_io *mc_io);
  2413.  
  2414.  int dpaa2_mac_open(struct dpaa2_mac *mac);
  2415.  
  2416. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
  2417. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c  2022-12-25 22:41:39.000000000 +0100
  2418. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c  2022-12-31 15:56:55.296955307 +0100
  2419. @@ -8,6 +8,7 @@
  2420.  #include <linux/module.h>
  2421.  #include <linux/of.h>
  2422.  #include <linux/of_address.h>
  2423. +#include <linux/msi.h>
  2424.  #include <linux/fsl/mc.h>
  2425.  
  2426.  #include "dpaa2-ptp.h"
  2427. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
  2428. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c   2022-12-25 22:41:39.000000000 +0100
  2429. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c   2022-12-31 15:56:55.296955307 +0100
  2430. @@ -60,18 +60,11 @@ dpaa2_switch_get_link_ksettings(struct n
  2431.  {
  2432.     struct ethsw_port_priv *port_priv = netdev_priv(netdev);
  2433.     struct dpsw_link_state state = {0};
  2434. -   int err;
  2435. +   int err = 0;
  2436.  
  2437. -   mutex_lock(&port_priv->mac_lock);
  2438. -
  2439. -   if (dpaa2_switch_port_is_type_phy(port_priv)) {
  2440. -       err = phylink_ethtool_ksettings_get(port_priv->mac->phylink,
  2441. -                           link_ksettings);
  2442. -       mutex_unlock(&port_priv->mac_lock);
  2443. -       return err;
  2444. -   }
  2445. -
  2446. -   mutex_unlock(&port_priv->mac_lock);
  2447. +   if (dpaa2_switch_port_is_type_phy(port_priv))
  2448. +       return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
  2449. +                            link_ksettings);
  2450.  
  2451.     err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
  2452.                      port_priv->ethsw_data->dpsw_handle,
  2453. @@ -106,16 +99,9 @@ dpaa2_switch_set_link_ksettings(struct n
  2454.     bool if_running;
  2455.     int err = 0, ret;
  2456.  
  2457. -   mutex_lock(&port_priv->mac_lock);
  2458. -
  2459. -   if (dpaa2_switch_port_is_type_phy(port_priv)) {
  2460. -       err = phylink_ethtool_ksettings_set(port_priv->mac->phylink,
  2461. -                           link_ksettings);
  2462. -       mutex_unlock(&port_priv->mac_lock);
  2463. -       return err;
  2464. -   }
  2465. -
  2466. -   mutex_unlock(&port_priv->mac_lock);
  2467. +   if (dpaa2_switch_port_is_type_phy(port_priv))
  2468. +       return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
  2469. +                            link_ksettings);
  2470.  
  2471.     /* Interface needs to be down to change link settings */
  2472.     if_running = netif_running(netdev);
  2473. @@ -159,9 +145,14 @@ dpaa2_switch_set_link_ksettings(struct n
  2474.  static int
  2475.  dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
  2476.  {
  2477. +   struct ethsw_port_priv *port_priv = netdev_priv(netdev);
  2478. +   int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS;
  2479. +
  2480.     switch (sset) {
  2481.     case ETH_SS_STATS:
  2482. -       return DPAA2_SWITCH_NUM_COUNTERS + dpaa2_mac_get_sset_count();
  2483. +       if (port_priv->mac)
  2484. +           num_ss_stats += dpaa2_mac_get_sset_count();
  2485. +       return num_ss_stats;
  2486.     default:
  2487.         return -EOPNOTSUPP;
  2488.     }
  2489. @@ -170,6 +161,7 @@ dpaa2_switch_ethtool_get_sset_count(stru
  2490.  static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
  2491.                          u32 stringset, u8 *data)
  2492.  {
  2493. +   struct ethsw_port_priv *port_priv = netdev_priv(netdev);
  2494.     u8 *p = data;
  2495.     int i;
  2496.  
  2497. @@ -180,7 +172,8 @@ static void dpaa2_switch_ethtool_get_str
  2498.                    ETH_GSTRING_LEN);
  2499.             p += ETH_GSTRING_LEN;
  2500.         }
  2501. -       dpaa2_mac_get_strings(p);
  2502. +       if (port_priv->mac)
  2503. +           dpaa2_mac_get_strings(p);
  2504.         break;
  2505.     }
  2506.  }
  2507. @@ -203,12 +196,8 @@ static void dpaa2_switch_ethtool_get_sta
  2508.                    dpaa2_switch_ethtool_counters[i].name, err);
  2509.     }
  2510.  
  2511. -   mutex_lock(&port_priv->mac_lock);
  2512. -
  2513. -   if (dpaa2_switch_port_has_mac(port_priv))
  2514. +   if (port_priv->mac)
  2515.         dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
  2516. -
  2517. -   mutex_unlock(&port_priv->mac_lock);
  2518.  }
  2519.  
  2520.  const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
  2521. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
  2522. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c   2022-12-25 22:41:39.000000000 +0100
  2523. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c   2022-12-31 15:56:55.296955307 +0100
  2524. @@ -10,6 +10,7 @@
  2525.  #include <linux/module.h>
  2526.  
  2527.  #include <linux/interrupt.h>
  2528. +#include <linux/msi.h>
  2529.  #include <linux/kthread.h>
  2530.  #include <linux/workqueue.h>
  2531.  #include <linux/iommu.h>
  2532. @@ -602,11 +603,8 @@ static int dpaa2_switch_port_link_state_
  2533.  
  2534.     /* When we manage the MAC/PHY using phylink there is no need
  2535.      * to manually update the netif_carrier.
  2536. -    * We can avoid locking because we are called from the "link changed"
  2537. -    * IRQ handler, which is the same as the "endpoint changed" IRQ handler
  2538. -    * (the writer to port_priv->mac), so we cannot race with it.
  2539.      */
  2540. -   if (dpaa2_mac_is_type_phy(port_priv->mac))
  2541. +   if (dpaa2_switch_port_is_type_phy(port_priv))
  2542.         return 0;
  2543.  
  2544.     /* Interrupts are received even though no one issued an 'ifconfig up'
  2545. @@ -686,8 +684,6 @@ static int dpaa2_switch_port_open(struct
  2546.     struct ethsw_core *ethsw = port_priv->ethsw_data;
  2547.     int err;
  2548.  
  2549. -   mutex_lock(&port_priv->mac_lock);
  2550. -
  2551.     if (!dpaa2_switch_port_is_type_phy(port_priv)) {
  2552.         /* Explicitly set carrier off, otherwise
  2553.          * netif_carrier_ok() will return true and cause 'ip link show'
  2554. @@ -701,17 +697,16 @@ static int dpaa2_switch_port_open(struct
  2555.                  port_priv->ethsw_data->dpsw_handle,
  2556.                  port_priv->idx);
  2557.     if (err) {
  2558. -       mutex_unlock(&port_priv->mac_lock);
  2559.         netdev_err(netdev, "dpsw_if_enable err %d\n", err);
  2560.         return err;
  2561.     }
  2562.  
  2563.     dpaa2_switch_enable_ctrl_if_napi(ethsw);
  2564.  
  2565. -   if (dpaa2_switch_port_is_type_phy(port_priv))
  2566. +   if (dpaa2_switch_port_is_type_phy(port_priv)) {
  2567.         dpaa2_mac_start(port_priv->mac);
  2568. -
  2569. -   mutex_unlock(&port_priv->mac_lock);
  2570. +       phylink_start(port_priv->mac->phylink);
  2571. +   }
  2572.  
  2573.     return 0;
  2574.  }
  2575. @@ -722,17 +717,14 @@ static int dpaa2_switch_port_stop(struct
  2576.     struct ethsw_core *ethsw = port_priv->ethsw_data;
  2577.     int err;
  2578.  
  2579. -   mutex_lock(&port_priv->mac_lock);
  2580. -
  2581.     if (dpaa2_switch_port_is_type_phy(port_priv)) {
  2582. +       phylink_stop(port_priv->mac->phylink);
  2583.         dpaa2_mac_stop(port_priv->mac);
  2584.     } else {
  2585.         netif_tx_stop_all_queues(netdev);
  2586.         netif_carrier_off(netdev);
  2587.     }
  2588.  
  2589. -   mutex_unlock(&port_priv->mac_lock);
  2590. -
  2591.     err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
  2592.                   port_priv->ethsw_data->dpsw_handle,
  2593.                   port_priv->idx);
  2594. @@ -1461,8 +1453,9 @@ static int dpaa2_switch_port_connect_mac
  2595.     err = dpaa2_mac_open(mac);
  2596.     if (err)
  2597.         goto err_free_mac;
  2598. +   port_priv->mac = mac;
  2599.  
  2600. -   if (dpaa2_mac_is_type_phy(mac)) {
  2601. +   if (dpaa2_switch_port_is_type_phy(port_priv)) {
  2602.         err = dpaa2_mac_connect(mac);
  2603.         if (err) {
  2604.             netdev_err(port_priv->netdev,
  2605. @@ -1472,14 +1465,11 @@ static int dpaa2_switch_port_connect_mac
  2606.         }
  2607.     }
  2608.  
  2609. -   mutex_lock(&port_priv->mac_lock);
  2610. -   port_priv->mac = mac;
  2611. -   mutex_unlock(&port_priv->mac_lock);
  2612. -
  2613.     return 0;
  2614.  
  2615.  err_close_mac:
  2616.     dpaa2_mac_close(mac);
  2617. +   port_priv->mac = NULL;
  2618.  err_free_mac:
  2619.     kfree(mac);
  2620.     return err;
  2621. @@ -1487,21 +1477,15 @@ err_free_mac:
  2622.  
  2623.  static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
  2624.  {
  2625. -   struct dpaa2_mac *mac;
  2626. -
  2627. -   mutex_lock(&port_priv->mac_lock);
  2628. -   mac = port_priv->mac;
  2629. -   port_priv->mac = NULL;
  2630. -   mutex_unlock(&port_priv->mac_lock);
  2631. +   if (dpaa2_switch_port_is_type_phy(port_priv))
  2632. +       dpaa2_mac_disconnect(port_priv->mac);
  2633.  
  2634. -   if (!mac)
  2635. +   if (!dpaa2_switch_port_has_mac(port_priv))
  2636.         return;
  2637.  
  2638. -   if (dpaa2_mac_is_type_phy(mac))
  2639. -       dpaa2_mac_disconnect(mac);
  2640. -
  2641. -   dpaa2_mac_close(mac);
  2642. -   kfree(mac);
  2643. +   dpaa2_mac_close(port_priv->mac);
  2644. +   kfree(port_priv->mac);
  2645. +   port_priv->mac = NULL;
  2646.  }
  2647.  
  2648.  static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
  2649. @@ -1511,7 +1495,6 @@ static irqreturn_t dpaa2_switch_irq0_han
  2650.     struct ethsw_port_priv *port_priv;
  2651.     u32 status = ~0;
  2652.     int err, if_id;
  2653. -   bool had_mac;
  2654.  
  2655.     err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
  2656.                   DPSW_IRQ_INDEX_IF, &status);
  2657. @@ -1529,15 +1512,12 @@ static irqreturn_t dpaa2_switch_irq0_han
  2658.     }
  2659.  
  2660.     if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
  2661. -       /* We can avoid locking because the "endpoint changed" IRQ
  2662. -        * handler is the only one who changes priv->mac at runtime,
  2663. -        * so we are not racing with anyone.
  2664. -        */
  2665. -       had_mac = !!port_priv->mac;
  2666. -       if (had_mac)
  2667. +       rtnl_lock();
  2668. +       if (dpaa2_switch_port_has_mac(port_priv))
  2669.             dpaa2_switch_port_disconnect_mac(port_priv);
  2670.         else
  2671.             dpaa2_switch_port_connect_mac(port_priv);
  2672. +       rtnl_unlock();
  2673.     }
  2674.  
  2675.  out:
  2676. @@ -2955,7 +2935,9 @@ static void dpaa2_switch_remove_port(str
  2677.  {
  2678.     struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
  2679.  
  2680. +   rtnl_lock();
  2681.     dpaa2_switch_port_disconnect_mac(port_priv);
  2682. +   rtnl_unlock();
  2683.     free_netdev(port_priv->netdev);
  2684.     ethsw->ports[port_idx] = NULL;
  2685.  }
  2686. @@ -3274,8 +3256,6 @@ static int dpaa2_switch_probe_port(struc
  2687.     port_priv->netdev = port_netdev;
  2688.     port_priv->ethsw_data = ethsw;
  2689.  
  2690. -   mutex_init(&port_priv->mac_lock);
  2691. -
  2692.     port_priv->idx = port_idx;
  2693.     port_priv->stp_state = BR_STATE_FORWARDING;
  2694.  
  2695. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
  2696. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h   2022-12-25 22:41:39.000000000 +0100
  2697. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h   2022-12-31 15:56:55.296955307 +0100
  2698. @@ -161,8 +161,6 @@ struct ethsw_port_priv {
  2699.  
  2700.     struct dpaa2_switch_filter_block *filter_block;
  2701.     struct dpaa2_mac    *mac;
  2702. -   /* Protects against changes to port_priv->mac */
  2703. -   struct mutex        mac_lock;
  2704.  };
  2705.  
  2706.  /* Switch data */
  2707. @@ -232,7 +230,12 @@ static inline bool dpaa2_switch_supports
  2708.  static inline bool
  2709.  dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
  2710.  {
  2711. -   return dpaa2_mac_is_type_phy(port_priv->mac);
  2712. +   if (port_priv->mac &&
  2713. +       (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
  2714. +        port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
  2715. +       return true;
  2716. +
  2717. +   return false;
  2718.  }
  2719.  
  2720.  static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
  2721. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
  2722. --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c  2022-12-25 22:41:39.000000000 +0100
  2723. +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c  1970-01-01 01:00:00.000000000 +0100
  2724. @@ -1,454 +0,0 @@
  2725. -// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  2726. -/* Copyright 2022 NXP
  2727. - */
  2728. -#include <linux/filter.h>
  2729. -#include <linux/compiler.h>
  2730. -#include <linux/bpf_trace.h>
  2731. -#include <net/xdp.h>
  2732. -#include <net/xdp_sock_drv.h>
  2733. -
  2734. -#include "dpaa2-eth.h"
  2735. -
  2736. -static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv,
  2737. -                    struct dpaa2_eth_channel *ch,
  2738. -                    enum dpaa2_eth_fq_type type,
  2739. -                    dpaa2_eth_consume_cb_t *consume)
  2740. -{
  2741. -   struct dpaa2_eth_fq *fq;
  2742. -   int i;
  2743. -
  2744. -   for (i = 0; i < priv->num_fqs; i++) {
  2745. -       fq = &priv->fq[i];
  2746. -
  2747. -       if (fq->type != type)
  2748. -           continue;
  2749. -       if (fq->channel != ch)
  2750. -           continue;
  2751. -
  2752. -       fq->consume = consume;
  2753. -   }
  2754. -}
  2755. -
  2756. -static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
  2757. -                struct dpaa2_eth_channel *ch,
  2758. -                struct dpaa2_eth_fq *rx_fq,
  2759. -                struct dpaa2_fd *fd, void *vaddr)
  2760. -{
  2761. -   dma_addr_t addr = dpaa2_fd_get_addr(fd);
  2762. -   struct bpf_prog *xdp_prog;
  2763. -   struct xdp_buff *xdp_buff;
  2764. -   struct dpaa2_eth_swa *swa;
  2765. -   u32 xdp_act = XDP_PASS;
  2766. -   int err;
  2767. -
  2768. -   xdp_prog = READ_ONCE(ch->xdp.prog);
  2769. -   if (!xdp_prog)
  2770. -       goto out;
  2771. -
  2772. -   swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE +
  2773. -                      ch->xsk_pool->umem->headroom);
  2774. -   xdp_buff = swa->xsk.xdp_buff;
  2775. -
  2776. -   xdp_buff->data_hard_start = vaddr;
  2777. -   xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd);
  2778. -   xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd);
  2779. -   xdp_set_data_meta_invalid(xdp_buff);
  2780. -   xdp_buff->rxq = &ch->xdp_rxq;
  2781. -
  2782. -   xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
  2783. -   xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
  2784. -
  2785. -   /* xdp.data pointer may have changed */
  2786. -   dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr);
  2787. -   dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data);
  2788. -
  2789. -   if (likely(xdp_act == XDP_REDIRECT)) {
  2790. -       err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog);
  2791. -       if (unlikely(err)) {
  2792. -           ch->stats.xdp_drop++;
  2793. -           dpaa2_eth_recycle_buf(priv, ch, addr);
  2794. -       } else {
  2795. -           ch->buf_count--;
  2796. -           ch->stats.xdp_redirect++;
  2797. -       }
  2798. -
  2799. -       goto xdp_redir;
  2800. -   }
  2801. -
  2802. -   switch (xdp_act) {
  2803. -   case XDP_PASS:
  2804. -       break;
  2805. -   case XDP_TX:
  2806. -       dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
  2807. -       break;
  2808. -   default:
  2809. -       bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
  2810. -       fallthrough;
  2811. -   case XDP_ABORTED:
  2812. -       trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
  2813. -       fallthrough;
  2814. -   case XDP_DROP:
  2815. -       dpaa2_eth_recycle_buf(priv, ch, addr);
  2816. -       ch->stats.xdp_drop++;
  2817. -       break;
  2818. -   }
  2819. -
  2820. -xdp_redir:
  2821. -   ch->xdp.res |= xdp_act;
  2822. -out:
  2823. -   return xdp_act;
  2824. -}
  2825. -
  2826. -/* Rx frame processing routine for the AF_XDP fast path */
  2827. -static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv,
  2828. -            struct dpaa2_eth_channel *ch,
  2829. -            const struct dpaa2_fd *fd,
  2830. -            struct dpaa2_eth_fq *fq)
  2831. -{
  2832. -   dma_addr_t addr = dpaa2_fd_get_addr(fd);
  2833. -   u8 fd_format = dpaa2_fd_get_format(fd);
  2834. -   struct rtnl_link_stats64 *percpu_stats;
  2835. -   u32 fd_length = dpaa2_fd_get_len(fd);
  2836. -   struct sk_buff *skb;
  2837. -   void *vaddr;
  2838. -   u32 xdp_act;
  2839. -
  2840. -   trace_dpaa2_rx_xsk_fd(priv->net_dev, fd);
  2841. -
  2842. -   vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
  2843. -   percpu_stats = this_cpu_ptr(priv->percpu_stats);
  2844. -
  2845. -   if (fd_format != dpaa2_fd_single) {
  2846. -       WARN_ON(priv->xdp_prog);
  2847. -       /* AF_XDP doesn't support any other formats */
  2848. -       goto err_frame_format;
  2849. -   }
  2850. -
  2851. -   xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
  2852. -   if (xdp_act != XDP_PASS) {
  2853. -       percpu_stats->rx_packets++;
  2854. -       percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
  2855. -       return;
  2856. -   }
  2857. -
  2858. -   /* Build skb */
  2859. -   skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr);
  2860. -   if (!skb)
  2861. -       /* Nothing else we can do, recycle the buffer and
  2862. -        * drop the frame.
  2863. -        */
  2864. -       goto err_alloc_skb;
  2865. -
  2866. -   /* Send the skb to the Linux networking stack */
  2867. -   dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
  2868. -
  2869. -   return;
  2870. -
  2871. -err_alloc_skb:
  2872. -   dpaa2_eth_recycle_buf(priv, ch, addr);
  2873. -err_frame_format:
  2874. -   percpu_stats->rx_dropped++;
  2875. -}
  2876. -
  2877. -static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv,
  2878. -                      struct dpni_pools_cfg *pools_params)
  2879. -{
  2880. -   int curr_bp = 0, i, j;
  2881. -
  2882. -   pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN;
  2883. -   for (i = 0; i < priv->num_bps; i++) {
  2884. -       for (j = 0; j < priv->num_channels; j++)
  2885. -           if (priv->bp[i] == priv->channel[j]->bp)
  2886. -               pools_params->pools[curr_bp].priority_mask |= (1 << j);
  2887. -       if (!pools_params->pools[curr_bp].priority_mask)
  2888. -           continue;
  2889. -
  2890. -       pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
  2891. -       pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
  2892. -       pools_params->pools[curr_bp++].backup_pool = 0;
  2893. -   }
  2894. -   pools_params->num_dpbp = curr_bp;
  2895. -}
  2896. -
  2897. -static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid)
  2898. -{
  2899. -   struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid);
  2900. -   struct dpaa2_eth_priv *priv = netdev_priv(dev);
  2901. -   struct dpni_pools_cfg pools_params = { 0 };
  2902. -   struct dpaa2_eth_channel *ch;
  2903. -   int err;
  2904. -   bool up;
  2905. -
  2906. -   ch = priv->channel[qid];
  2907. -   if (!ch->xsk_pool)
  2908. -       return -EINVAL;
  2909. -
  2910. -   up = netif_running(dev);
  2911. -   if (up)
  2912. -       dev_close(dev);
  2913. -
  2914. -   xsk_pool_dma_unmap(pool, 0);
  2915. -   err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq,
  2916. -                    MEM_TYPE_PAGE_ORDER0, NULL);
  2917. -   if (err)
  2918. -       netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n",
  2919. -              err);
  2920. -
  2921. -   dpaa2_eth_free_dpbp(priv, ch->bp);
  2922. -
  2923. -   ch->xsk_zc = false;
  2924. -   ch->xsk_pool = NULL;
  2925. -   ch->xsk_tx_pkts_sent = 0;
  2926. -   ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
  2927. -
  2928. -   dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx);
  2929. -
  2930. -   dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
  2931. -   err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
  2932. -   if (err)
  2933. -       netdev_err(dev, "dpni_set_pools() failed\n");
  2934. -
  2935. -   if (up) {
  2936. -       err = dev_open(dev, NULL);
  2937. -       if (err)
  2938. -           return err;
  2939. -   }
  2940. -
  2941. -   return 0;
  2942. -}
  2943. -
  2944. -static int dpaa2_xsk_enable_pool(struct net_device *dev,
  2945. -                struct xsk_buff_pool *pool,
  2946. -                u16 qid)
  2947. -{
  2948. -   struct dpaa2_eth_priv *priv = netdev_priv(dev);
  2949. -   struct dpni_pools_cfg pools_params = { 0 };
  2950. -   struct dpaa2_eth_channel *ch;
  2951. -   int err, err2;
  2952. -   bool up;
  2953. -
  2954. -   if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) {
  2955. -       netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n");
  2956. -       return -EOPNOTSUPP;
  2957. -   }
  2958. -
  2959. -   if (priv->dpni_attrs.num_queues > 8) {
  2960. -       netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n");
  2961. -       return -EOPNOTSUPP;
  2962. -   }
  2963. -
  2964. -   up = netif_running(dev);
  2965. -   if (up)
  2966. -       dev_close(dev);
  2967. -
  2968. -   err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0);
  2969. -   if (err) {
  2970. -       netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n",
  2971. -              err);
  2972. -       goto err_dma_unmap;
  2973. -   }
  2974. -
  2975. -   ch = priv->channel[qid];
  2976. -   err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
  2977. -   if (err) {
  2978. -       netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err);
  2979. -       goto err_mem_model;
  2980. -   }
  2981. -   xsk_pool_set_rxq_info(pool, &ch->xdp_rxq);
  2982. -
  2983. -   priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv);
  2984. -   if (IS_ERR(priv->bp[priv->num_bps])) {
  2985. -       err = PTR_ERR(priv->bp[priv->num_bps]);
  2986. -       goto err_bp_alloc;
  2987. -   }
  2988. -   ch->xsk_zc = true;
  2989. -   ch->xsk_pool = pool;
  2990. -   ch->bp = priv->bp[priv->num_bps++];
  2991. -
  2992. -   dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx);
  2993. -
  2994. -   dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
  2995. -   err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
  2996. -   if (err) {
  2997. -       netdev_err(dev, "dpni_set_pools() failed\n");
  2998. -       goto err_set_pools;
  2999. -   }
  3000. -
  3001. -   if (up) {
  3002. -       err = dev_open(dev, NULL);
  3003. -       if (err)
  3004. -           return err;
  3005. -   }
  3006. -
  3007. -   return 0;
  3008. -
  3009. -err_set_pools:
  3010. -   err2 = dpaa2_xsk_disable_pool(dev, qid);
  3011. -   if (err2)
  3012. -       netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2);
  3013. -err_bp_alloc:
  3014. -   err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq,
  3015. -                     MEM_TYPE_PAGE_ORDER0, NULL);
  3016. -   if (err2)
  3017. -       netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2);
  3018. -err_mem_model:
  3019. -   xsk_pool_dma_unmap(pool, 0);
  3020. -err_dma_unmap:
  3021. -   if (up)
  3022. -       dev_open(dev, NULL);
  3023. -
  3024. -   return err;
  3025. -}
  3026. -
  3027. -int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
  3028. -{
  3029. -   return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) :
  3030. -             dpaa2_xsk_disable_pool(dev, qid);
  3031. -}
  3032. -
  3033. -int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
  3034. -{
  3035. -   struct dpaa2_eth_priv *priv = netdev_priv(dev);
  3036. -   struct dpaa2_eth_channel *ch = priv->channel[qid];
  3037. -
  3038. -   if (!priv->link_state.up)
  3039. -       return -ENETDOWN;
  3040. -
  3041. -   if (!priv->xdp_prog)
  3042. -       return -EINVAL;
  3043. -
  3044. -   if (!ch->xsk_zc)
  3045. -       return -EINVAL;
  3046. -
  3047. -   /* We do not have access to a per channel SW interrupt, so instead we
  3048. -    * schedule a NAPI instance.
  3049. -    */
  3050. -   if (!napi_if_scheduled_mark_missed(&ch->napi))
  3051. -       napi_schedule(&ch->napi);
  3052. -
  3053. -   return 0;
  3054. -}
  3055. -
  3056. -static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv,
  3057. -                struct dpaa2_eth_channel *ch,
  3058. -                struct dpaa2_fd *fd,
  3059. -                struct xdp_desc *xdp_desc)
  3060. -{
  3061. -   struct device *dev = priv->net_dev->dev.parent;
  3062. -   struct dpaa2_sg_entry *sgt;
  3063. -   struct dpaa2_eth_swa *swa;
  3064. -   void *sgt_buf = NULL;
  3065. -   dma_addr_t sgt_addr;
  3066. -   int sgt_buf_size;
  3067. -   dma_addr_t addr;
  3068. -   int err = 0;
  3069. -
  3070. -   /* Prepare the HW SGT structure */
  3071. -   sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
  3072. -   sgt_buf = dpaa2_eth_sgt_get(priv);
  3073. -   if (unlikely(!sgt_buf))
  3074. -       return -ENOMEM;
  3075. -   sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
  3076. -
  3077. -   /* Get the address of the XSK Tx buffer */
  3078. -   addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr);
  3079. -   xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len);
  3080. -
  3081. -   /* Fill in the HW SGT structure */
  3082. -   dpaa2_sg_set_addr(sgt, addr);
  3083. -   dpaa2_sg_set_len(sgt, xdp_desc->len);
  3084. -   dpaa2_sg_set_final(sgt, true);
  3085. -
  3086. -   /* Store the necessary info in the SGT buffer */
  3087. -   swa = (struct dpaa2_eth_swa *)sgt_buf;
  3088. -   swa->type = DPAA2_ETH_SWA_XSK;
  3089. -   swa->xsk.sgt_size = sgt_buf_size;
  3090. -
  3091. -   /* Separately map the SGT buffer */
  3092. -   sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
  3093. -   if (unlikely(dma_mapping_error(dev, sgt_addr))) {
  3094. -       err = -ENOMEM;
  3095. -       goto sgt_map_failed;
  3096. -   }
  3097. -
  3098. -   /* Initialize FD fields */
  3099. -   memset(fd, 0, sizeof(struct dpaa2_fd));
  3100. -   dpaa2_fd_set_offset(fd, priv->tx_data_offset);
  3101. -   dpaa2_fd_set_format(fd, dpaa2_fd_sg);
  3102. -   dpaa2_fd_set_addr(fd, sgt_addr);
  3103. -   dpaa2_fd_set_len(fd, xdp_desc->len);
  3104. -   dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
  3105. -
  3106. -   return 0;
  3107. -
  3108. -sgt_map_failed:
  3109. -   dpaa2_eth_sgt_recycle(priv, sgt_buf);
  3110. -
  3111. -   return err;
  3112. -}
  3113. -
  3114. -bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
  3115. -         struct dpaa2_eth_channel *ch)
  3116. -{
  3117. -   struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs;
  3118. -   struct dpaa2_eth_drv_stats *percpu_extras;
  3119. -   struct rtnl_link_stats64 *percpu_stats;
  3120. -   int budget = DPAA2_ETH_TX_ZC_PER_NAPI;
  3121. -   int total_enqueued, enqueued;
  3122. -   int retries, max_retries;
  3123. -   struct dpaa2_eth_fq *fq;
  3124. -   struct dpaa2_fd *fds;
  3125. -   int batch, i, err;
  3126. -
  3127. -   percpu_stats = this_cpu_ptr(priv->percpu_stats);
  3128. -   percpu_extras = this_cpu_ptr(priv->percpu_extras);
  3129. -   fds = (this_cpu_ptr(priv->fd))->array;
  3130. -
  3131. -   /* Use the FQ with the same idx as the affine CPU */
  3132. -   fq = &priv->fq[ch->nctx.desired_cpu];
  3133. -
  3134. -   batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget);
  3135. -   if (!batch)
  3136. -       return false;
  3137. -
  3138. -   /* Create a FD for each XSK frame to be sent */
  3139. -   for (i = 0; i < batch; i++) {
  3140. -       err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]);
  3141. -       if (err) {
  3142. -           batch = i;
  3143. -           break;
  3144. -       }
  3145. -
  3146. -       trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]);
  3147. -   }
  3148. -
  3149. -   /* Enqueue all the created FDs */
  3150. -   max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES;
  3151. -   total_enqueued = 0;
  3152. -   enqueued = 0;
  3153. -   retries = 0;
  3154. -   while (total_enqueued < batch && retries < max_retries) {
  3155. -       err = priv->enqueue(priv, fq, &fds[total_enqueued], 0,
  3156. -                   batch - total_enqueued, &enqueued);
  3157. -       if (err == -EBUSY) {
  3158. -           retries++;
  3159. -           continue;
  3160. -       }
  3161. -
  3162. -       total_enqueued += enqueued;
  3163. -   }
  3164. -   percpu_extras->tx_portal_busy += retries;
  3165. -
  3166. -   /* Update statistics */
  3167. -   percpu_stats->tx_packets += total_enqueued;
  3168. -   for (i = 0; i < total_enqueued; i++)
  3169. -       percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
  3170. -   for (i = total_enqueued; i < batch; i++) {
  3171. -       dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false);
  3172. -       percpu_stats->tx_errors++;
  3173. -   }
  3174. -
  3175. -   xsk_tx_release(ch->xsk_pool);
  3176. -
  3177. -   return total_enqueued == budget;
  3178. -}
  3179. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
  3180. --- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h   2022-12-25 22:41:39.000000000 +0100
  3181. +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h   2022-12-31 15:56:55.296955307 +0100
  3182. @@ -13,12 +13,10 @@
  3183.  #define DPNI_VER_MINOR             0
  3184.  #define DPNI_CMD_BASE_VERSION          1
  3185.  #define DPNI_CMD_2ND_VERSION           2
  3186. -#define DPNI_CMD_3RD_VERSION           3
  3187.  #define DPNI_CMD_ID_OFFSET         4
  3188.  
  3189.  #define DPNI_CMD(id)   (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
  3190.  #define DPNI_CMD_V2(id)    (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
  3191. -#define DPNI_CMD_V3(id)    (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION)
  3192.  
  3193.  #define DPNI_CMDID_OPEN                    DPNI_CMD(0x801)
  3194.  #define DPNI_CMDID_CLOSE               DPNI_CMD(0x800)
  3195. @@ -41,7 +39,7 @@
  3196.  #define DPNI_CMDID_GET_IRQ_STATUS          DPNI_CMD(0x016)
  3197.  #define DPNI_CMDID_CLEAR_IRQ_STATUS            DPNI_CMD(0x017)
  3198.  
  3199. -#define DPNI_CMDID_SET_POOLS               DPNI_CMD_V3(0x200)
  3200. +#define DPNI_CMDID_SET_POOLS               DPNI_CMD(0x200)
  3201.  #define DPNI_CMDID_SET_ERRORS_BEHAVIOR         DPNI_CMD(0x20B)
  3202.  
  3203.  #define DPNI_CMDID_GET_QDID                DPNI_CMD(0x210)
  3204. @@ -117,19 +115,14 @@ struct dpni_cmd_open {
  3205.  };
  3206.  
  3207.  #define DPNI_BACKUP_POOL(val, order)   (((val) & 0x1) << (order))
  3208. -
  3209. -struct dpni_cmd_pool {
  3210. -   __le16 dpbp_id;
  3211. -   u8 priority_mask;
  3212. -   u8 pad;
  3213. -};
  3214. -
  3215.  struct dpni_cmd_set_pools {
  3216. +   /* cmd word 0 */
  3217.     u8 num_dpbp;
  3218.     u8 backup_pool_mask;
  3219. -   u8 pad;
  3220. -   u8 pool_options;
  3221. -   struct dpni_cmd_pool pool[DPNI_MAX_DPBP];
  3222. +   __le16 pad;
  3223. +   /* cmd word 0..4 */
  3224. +   __le32 dpbp_id[DPNI_MAX_DPBP];
  3225. +   /* cmd word 4..6 */
  3226.     __le16 buffer_size[DPNI_MAX_DPBP];
  3227.  };
  3228.  
  3229. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
  3230. --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c   2022-12-25 22:41:39.000000000 +0100
  3231. +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c   2022-12-31 15:56:55.296955307 +0100
  3232. @@ -173,12 +173,8 @@ int dpni_set_pools(struct fsl_mc_io *mc_
  3233.                       token);
  3234.     cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
  3235.     cmd_params->num_dpbp = cfg->num_dpbp;
  3236. -   cmd_params->pool_options = cfg->pool_options;
  3237.     for (i = 0; i < DPNI_MAX_DPBP; i++) {
  3238. -       cmd_params->pool[i].dpbp_id =
  3239. -           cpu_to_le16(cfg->pools[i].dpbp_id);
  3240. -       cmd_params->pool[i].priority_mask =
  3241. -           cfg->pools[i].priority_mask;
  3242. +       cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
  3243.         cmd_params->buffer_size[i] =
  3244.             cpu_to_le16(cfg->pools[i].buffer_size);
  3245.         cmd_params->backup_pool_mask |=
  3246. diff -rupN a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
  3247. --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h   2022-12-25 22:41:39.000000000 +0100
  3248. +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h   2022-12-31 15:56:55.296955307 +0100
  3249. @@ -92,28 +92,19 @@ int dpni_close(struct fsl_mc_io *mc_io,
  3250.            u32      cmd_flags,
  3251.            u16      token);
  3252.  
  3253. -#define DPNI_POOL_ASSOC_QPRI   0
  3254. -#define DPNI_POOL_ASSOC_QDBIN  1
  3255. -
  3256.  /**
  3257.   * struct dpni_pools_cfg - Structure representing buffer pools configuration
  3258.   * @num_dpbp: Number of DPBPs
  3259. - * @pool_options: Buffer assignment options.
  3260. - * This field is a combination of DPNI_POOL_ASSOC_flags
  3261.   * @pools: Array of buffer pools parameters; The number of valid entries
  3262.   * must match 'num_dpbp' value
  3263.   * @pools.dpbp_id: DPBP object ID
  3264. - * @pools.priority: Priority mask that indicates TC's used with this buffer.
  3265. - * If set to 0x00 MC will assume value 0xff.
  3266.   * @pools.buffer_size: Buffer size
  3267.   * @pools.backup_pool: Backup pool
  3268.   */
  3269.  struct dpni_pools_cfg {
  3270.     u8      num_dpbp;
  3271. -   u8      pool_options;
  3272.     struct {
  3273.         int dpbp_id;
  3274. -       u8  priority_mask;
  3275.         u16 buffer_size;
  3276.         int backup_pool;
  3277.     } pools[DPNI_MAX_DPBP];
  3278. diff -rupN a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
  3279. --- a/drivers/net/ethernet/freescale/enetc/enetc.c  2022-12-25 22:41:39.000000000 +0100
  3280. +++ b/drivers/net/ethernet/freescale/enetc/enetc.c  2022-12-31 15:56:55.297955293 +0100
  3281. @@ -1489,6 +1489,23 @@ static void enetc_xdp_drop(struct enetc_
  3282.     rx_ring->stats.xdp_drops++;
  3283.  }
  3284.  
  3285. +static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
  3286. +              int rx_ring_last)
  3287. +{
  3288. +   while (rx_ring_first != rx_ring_last) {
  3289. +       struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
  3290. +
  3291. +       if (rx_swbd->page) {
  3292. +           dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
  3293. +                      rx_swbd->dir);
  3294. +           __free_page(rx_swbd->page);
  3295. +           rx_swbd->page = NULL;
  3296. +       }
  3297. +       enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
  3298. +   }
  3299. +   rx_ring->stats.xdp_redirect_failures++;
  3300. +}
  3301. +
  3302.  static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
  3303.                    struct napi_struct *napi, int work_limit,
  3304.                    struct bpf_prog *prog)
  3305. @@ -1510,8 +1527,8 @@ static int enetc_clean_rx_ring_xdp(struc
  3306.         int orig_i, orig_cleaned_cnt;
  3307.         struct xdp_buff xdp_buff;
  3308.         struct sk_buff *skb;
  3309. +       int tmp_orig_i, err;
  3310.         u32 bd_status;
  3311. -       int err;
  3312.  
  3313.         rxbd = enetc_rxbd(rx_ring, i);
  3314.         bd_status = le32_to_cpu(rxbd->r.lstatus);
  3315. @@ -1598,16 +1615,18 @@ static int enetc_clean_rx_ring_xdp(struc
  3316.                 break;
  3317.             }
  3318.  
  3319. +           tmp_orig_i = orig_i;
  3320. +
  3321. +           while (orig_i != i) {
  3322. +               enetc_flip_rx_buff(rx_ring,
  3323. +                          &rx_ring->rx_swbd[orig_i]);
  3324. +               enetc_bdr_idx_inc(rx_ring, &orig_i);
  3325. +           }
  3326. +
  3327.             err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
  3328.             if (unlikely(err)) {
  3329. -               enetc_xdp_drop(rx_ring, orig_i, i);
  3330. -               rx_ring->stats.xdp_redirect_failures++;
  3331. +               enetc_xdp_free(rx_ring, tmp_orig_i, i);
  3332.             } else {
  3333. -               while (orig_i != i) {
  3334. -                   enetc_flip_rx_buff(rx_ring,
  3335. -                              &rx_ring->rx_swbd[orig_i]);
  3336. -                   enetc_bdr_idx_inc(rx_ring, &orig_i);
  3337. -               }
  3338.                 xdp_redirect_frm_cnt++;
  3339.                 rx_ring->stats.xdp_redirect++;
  3340.             }
  3341. diff -rupN a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
  3342. --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c   2022-12-25 22:41:39.000000000 +0100
  3343. +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c   2022-12-31 15:56:55.297955293 +0100
  3344. @@ -1111,6 +1111,7 @@ static void enetc_pl_mac_link_down(struc
  3345.  }
  3346.  
  3347.  static const struct phylink_mac_ops enetc_mac_phylink_ops = {
  3348. +   .validate = phylink_generic_validate,
  3349.     .mac_select_pcs = enetc_pl_mac_select_pcs,
  3350.     .mac_config = enetc_pl_mac_config,
  3351.     .mac_link_up = enetc_pl_mac_link_up,
  3352. diff -rupN a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
  3353. --- a/drivers/net/ethernet/freescale/fec.h  2022-12-25 22:41:39.000000000 +0100
  3354. +++ b/drivers/net/ethernet/freescale/fec.h  2022-12-31 15:56:55.298955279 +0100
  3355. @@ -348,6 +348,7 @@ struct bufdesc_ex {
  3356.   */
  3357.  
  3358.  #define FEC_ENET_XDP_HEADROOM  (XDP_PACKET_HEADROOM)
  3359. +
  3360.  #define FEC_ENET_RX_PAGES  256
  3361.  #define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
  3362.         - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  3363. @@ -526,19 +527,6 @@ struct fec_enet_priv_txrx_info {
  3364.     struct  sk_buff *skb;
  3365.  };
  3366.  
  3367. -enum {
  3368. -   RX_XDP_REDIRECT = 0,
  3369. -   RX_XDP_PASS,
  3370. -   RX_XDP_DROP,
  3371. -   RX_XDP_TX,
  3372. -   RX_XDP_TX_ERRORS,
  3373. -   TX_XDP_XMIT,
  3374. -   TX_XDP_XMIT_ERRORS,
  3375. -
  3376. -   /* The following must be the last one */
  3377. -   XDP_STATS_TOTAL,
  3378. -};
  3379. -
  3380.  struct fec_enet_priv_tx_q {
  3381.     struct bufdesc_prop bd;
  3382.     unsigned char *tx_bounce[TX_RING_SIZE];
  3383. @@ -559,7 +547,6 @@ struct fec_enet_priv_rx_q {
  3384.     /* page_pool */
  3385.     struct page_pool *page_pool;
  3386.     struct xdp_rxq_info xdp_rxq;
  3387. -   u32 stats[XDP_STATS_TOTAL];
  3388.  
  3389.     /* rx queue number, in the range 0-7 */
  3390.     u8 id;
  3391. @@ -671,14 +658,9 @@ struct fec_enet_private {
  3392.     unsigned int reload_period;
  3393.     int pps_enable;
  3394.     unsigned int next_counter;
  3395. -   struct hrtimer perout_timer;
  3396. -   u64 perout_stime;
  3397.  
  3398.     struct imx_sc_ipc *ipc_handle;
  3399.  
  3400. -   /* XDP BPF Program */
  3401. -   struct bpf_prog *xdp_prog;
  3402. -
  3403.     u64 ethtool_stats[];
  3404.  };
  3405.  
  3406. diff -rupN a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
  3407. --- a/drivers/net/ethernet/freescale/fec_main.c 2022-12-25 22:41:39.000000000 +0100
  3408. +++ b/drivers/net/ethernet/freescale/fec_main.c 2022-12-31 15:56:55.298955279 +0100
  3409. @@ -89,11 +89,6 @@ static const u16 fec_enet_vlan_pri_to_qu
  3410.  #define FEC_ENET_OPD_V 0xFFF0
  3411.  #define FEC_MDIO_PM_TIMEOUT  100 /* ms */
  3412.  
  3413. -#define FEC_ENET_XDP_PASS          0
  3414. -#define FEC_ENET_XDP_CONSUMED      BIT(0)
  3415. -#define FEC_ENET_XDP_TX            BIT(1)
  3416. -#define FEC_ENET_XDP_REDIR         BIT(2)
  3417. -
  3418.  struct fec_devinfo {
  3419.     u32 quirks;
  3420.  };
  3421. @@ -370,6 +365,16 @@ static void swap_buffer(void *bufaddr, i
  3422.         swab32s(buf);
  3423.  }
  3424.  
  3425. +static void swap_buffer2(void *dst_buf, void *src_buf, int len)
  3426. +{
  3427. +   int i;
  3428. +   unsigned int *src = src_buf;
  3429. +   unsigned int *dst = dst_buf;
  3430. +
  3431. +   for (i = 0; i < len; i += 4, src++, dst++)
  3432. +       *dst = swab32p(src);
  3433. +}
  3434. +
  3435.  static void fec_dump(struct net_device *ndev)
  3436.  {
  3437.     struct fec_enet_private *fep = netdev_priv(ndev);
  3438. @@ -423,14 +428,13 @@ static int
  3439.  fec_enet_create_page_pool(struct fec_enet_private *fep,
  3440.               struct fec_enet_priv_rx_q *rxq, int size)
  3441.  {
  3442. -   struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
  3443.     struct page_pool_params pp_params = {
  3444.         .order = 0,
  3445.         .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
  3446.         .pool_size = size,
  3447.         .nid = dev_to_node(&fep->pdev->dev),
  3448.         .dev = &fep->pdev->dev,
  3449. -       .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
  3450. +       .dma_dir = DMA_FROM_DEVICE,
  3451.         .offset = FEC_ENET_XDP_HEADROOM,
  3452.         .max_len = FEC_ENET_RX_FRSIZE,
  3453.     };
  3454. @@ -1490,6 +1494,53 @@ static void fec_enet_tx(struct net_devic
  3455.         fec_enet_tx_queue(ndev, i);
  3456.  }
  3457.  
  3458. +static int __maybe_unused
  3459. +fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
  3460. +{
  3461. +   struct  fec_enet_private *fep = netdev_priv(ndev);
  3462. +   int off;
  3463. +
  3464. +   off = ((unsigned long)skb->data) & fep->rx_align;
  3465. +   if (off)
  3466. +       skb_reserve(skb, fep->rx_align + 1 - off);
  3467. +
  3468. +   bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
  3469. +   if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
  3470. +       if (net_ratelimit())
  3471. +           netdev_err(ndev, "Rx DMA memory map failed\n");
  3472. +       return -ENOMEM;
  3473. +   }
  3474. +
  3475. +   return 0;
  3476. +}
  3477. +
  3478. +static bool __maybe_unused
  3479. +fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
  3480. +          struct bufdesc *bdp, u32 length, bool swap)
  3481. +{
  3482. +   struct  fec_enet_private *fep = netdev_priv(ndev);
  3483. +   struct sk_buff *new_skb;
  3484. +
  3485. +   if (length > fep->rx_copybreak)
  3486. +       return false;
  3487. +
  3488. +   new_skb = netdev_alloc_skb(ndev, length);
  3489. +   if (!new_skb)
  3490. +       return false;
  3491. +
  3492. +   dma_sync_single_for_cpu(&fep->pdev->dev,
  3493. +               fec32_to_cpu(bdp->cbd_bufaddr),
  3494. +               FEC_ENET_RX_FRSIZE - fep->rx_align,
  3495. +               DMA_FROM_DEVICE);
  3496. +   if (!swap)
  3497. +       memcpy(new_skb->data, (*skb)->data, length);
  3498. +   else
  3499. +       swap_buffer2(new_skb->data, (*skb)->data, length);
  3500. +   *skb = new_skb;
  3501. +
  3502. +   return true;
  3503. +}
  3504. +
  3505.  static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
  3506.                 struct bufdesc *bdp, int index)
  3507.  {
  3508. @@ -1505,62 +1556,6 @@ static void fec_enet_update_cbd(struct f
  3509.     bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
  3510.  }
  3511.  
  3512. -static u32
  3513. -fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
  3514. -        struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index)
  3515. -{
  3516. -   unsigned int sync, len = xdp->data_end - xdp->data;
  3517. -   u32 ret = FEC_ENET_XDP_PASS;
  3518. -   struct page *page;
  3519. -   int err;
  3520. -   u32 act;
  3521. -
  3522. -   act = bpf_prog_run_xdp(prog, xdp);
  3523. -
  3524. -   /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
  3525. -   sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM;
  3526. -   sync = max(sync, len);
  3527. -
  3528. -   switch (act) {
  3529. -   case XDP_PASS:
  3530. -       rxq->stats[RX_XDP_PASS]++;
  3531. -       ret = FEC_ENET_XDP_PASS;
  3532. -       break;
  3533. -
  3534. -   case XDP_REDIRECT:
  3535. -       rxq->stats[RX_XDP_REDIRECT]++;
  3536. -       err = xdp_do_redirect(fep->netdev, xdp, prog);
  3537. -       if (!err) {
  3538. -           ret = FEC_ENET_XDP_REDIR;
  3539. -       } else {
  3540. -           ret = FEC_ENET_XDP_CONSUMED;
  3541. -           page = virt_to_head_page(xdp->data);
  3542. -           page_pool_put_page(rxq->page_pool, page, sync, true);
  3543. -       }
  3544. -       break;
  3545. -
  3546. -   default:
  3547. -       bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
  3548. -       fallthrough;
  3549. -
  3550. -   case XDP_TX:
  3551. -       bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
  3552. -       fallthrough;
  3553. -
  3554. -   case XDP_ABORTED:
  3555. -       fallthrough;    /* handle aborts by dropping packet */
  3556. -
  3557. -   case XDP_DROP:
  3558. -       rxq->stats[RX_XDP_DROP]++;
  3559. -       ret = FEC_ENET_XDP_CONSUMED;
  3560. -       page = virt_to_head_page(xdp->data);
  3561. -       page_pool_put_page(rxq->page_pool, page, sync, true);
  3562. -       break;
  3563. -   }
  3564. -
  3565. -   return ret;
  3566. -}
  3567. -
  3568.  /* During a receive, the bd_rx.cur points to the current incoming buffer.
  3569.   * When we update through the ring, if the next incoming buffer has
  3570.   * not been given to the system, we just set the empty indicator,
  3571. @@ -1582,22 +1577,7 @@ fec_enet_rx_queue(struct net_device *nde
  3572.     u16 vlan_tag;
  3573.     int index = 0;
  3574.     bool    need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
  3575. -   struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
  3576. -   u32 ret, xdp_result = FEC_ENET_XDP_PASS;
  3577. -   u32 data_start = FEC_ENET_XDP_HEADROOM;
  3578. -   struct xdp_buff xdp;
  3579.     struct page *page;
  3580. -   u32 sub_len = 4;
  3581. -
  3582. -#if !defined(CONFIG_M5272)
  3583. -   /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
  3584. -    * FEC_RACC_SHIFT16 is set by default in the probe function.
  3585. -    */
  3586. -   if (fep->quirks & FEC_QUIRK_HAS_RACC) {
  3587. -       data_start += 2;
  3588. -       sub_len += 2;
  3589. -   }
  3590. -#endif
  3591.  
  3592.  #ifdef CONFIG_M532x
  3593.     flush_cache_all();
  3594. @@ -1608,7 +1588,6 @@ fec_enet_rx_queue(struct net_device *nde
  3595.      * These get messed up if we get called due to a busy condition.
  3596.      */
  3597.     bdp = rxq->bd.cur;
  3598. -   xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
  3599.  
  3600.     while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
  3601.  
  3602. @@ -1658,39 +1637,23 @@ fec_enet_rx_queue(struct net_device *nde
  3603.         prefetch(page_address(page));
  3604.         fec_enet_update_cbd(rxq, bdp, index);
  3605.  
  3606. -       if (xdp_prog) {
  3607. -           xdp_buff_clear_frags_flag(&xdp);
  3608. -           /* subtract 16bit shift and FCS */
  3609. -           xdp_prepare_buff(&xdp, page_address(page),
  3610. -                    data_start, pkt_len - sub_len, false);
  3611. -           ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
  3612. -           xdp_result |= ret;
  3613. -           if (ret != FEC_ENET_XDP_PASS)
  3614. -               goto rx_processing_done;
  3615. -       }
  3616. -
  3617.         /* The packet length includes FCS, but we don't want to
  3618.          * include that when passing upstream as it messes up
  3619.          * bridging applications.
  3620.          */
  3621.         skb = build_skb(page_address(page), PAGE_SIZE);
  3622. -       if (unlikely(!skb)) {
  3623. -           page_pool_recycle_direct(rxq->page_pool, page);
  3624. -           ndev->stats.rx_dropped++;
  3625. -
  3626. -           netdev_err_once(ndev, "build_skb failed!\n");
  3627. -           goto rx_processing_done;
  3628. -       }
  3629. -
  3630. -       skb_reserve(skb, data_start);
  3631. -       skb_put(skb, pkt_len - sub_len);
  3632. +       skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
  3633. +       skb_put(skb, pkt_len - 4);
  3634.         skb_mark_for_recycle(skb);
  3635. +       data = skb->data;
  3636.  
  3637. -       if (unlikely(need_swap)) {
  3638. -           data = page_address(page) + FEC_ENET_XDP_HEADROOM;
  3639. +       if (need_swap)
  3640.             swap_buffer(data, pkt_len);
  3641. -       }
  3642. -       data = skb->data;
  3643. +
  3644. +#if !defined(CONFIG_M5272)
  3645. +       if (fep->quirks & FEC_QUIRK_HAS_RACC)
  3646. +           data = skb_pull_inline(skb, 2);
  3647. +#endif
  3648.  
  3649.         /* Extract the enhanced buffer descriptor */
  3650.         ebdp = NULL;
  3651. @@ -1769,10 +1732,6 @@ rx_processing_done:
  3652.         writel(0, rxq->bd.reg_desc_active);
  3653.     }
  3654.     rxq->bd.cur = bdp;
  3655. -
  3656. -   if (xdp_result & FEC_ENET_XDP_REDIR)
  3657. -       xdp_do_flush_map();
  3658. -
  3659.     return pkt_received;
  3660.  }
  3661.  
  3662. @@ -2267,7 +2226,7 @@ static int fec_enet_mii_probe(struct net
  3663.     fep->link = 0;
  3664.     fep->full_duplex = 0;
  3665.  
  3666. -   phy_dev->mac_managed_pm = true;
  3667. +   phy_dev->mac_managed_pm = 1;
  3668.  
  3669.     phy_attached_info(phy_dev);
  3670.  
  3671. @@ -2712,16 +2671,6 @@ static const struct fec_stat {
  3672.  
  3673.  #define FEC_STATS_SIZE     (ARRAY_SIZE(fec_stats) * sizeof(u64))
  3674.  
  3675. -static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
  3676. -   "rx_xdp_redirect",           /* RX_XDP_REDIRECT = 0, */
  3677. -   "rx_xdp_pass",               /* RX_XDP_PASS, */
  3678. -   "rx_xdp_drop",               /* RX_XDP_DROP, */
  3679. -   "rx_xdp_tx",                 /* RX_XDP_TX, */
  3680. -   "rx_xdp_tx_errors",          /* RX_XDP_TX_ERRORS, */
  3681. -   "tx_xdp_xmit",               /* TX_XDP_XMIT, */
  3682. -   "tx_xdp_xmit_errors",        /* TX_XDP_XMIT_ERRORS, */
  3683. -};
  3684. -
  3685.  static void fec_enet_update_ethtool_stats(struct net_device *dev)
  3686.  {
  3687.     struct fec_enet_private *fep = netdev_priv(dev);
  3688. @@ -2731,40 +2680,6 @@ static void fec_enet_update_ethtool_stat
  3689.         fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
  3690.  }
  3691.  
  3692. -static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
  3693. -{
  3694. -   u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
  3695. -   struct fec_enet_priv_rx_q *rxq;
  3696. -   int i, j;
  3697. -
  3698. -   for (i = fep->num_rx_queues - 1; i >= 0; i--) {
  3699. -       rxq = fep->rx_queue[i];
  3700. -
  3701. -       for (j = 0; j < XDP_STATS_TOTAL; j++)
  3702. -           xdp_stats[j] += rxq->stats[j];
  3703. -   }
  3704. -
  3705. -   memcpy(data, xdp_stats, sizeof(xdp_stats));
  3706. -}
  3707. -
  3708. -static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
  3709. -{
  3710. -   struct page_pool_stats stats = {};
  3711. -   struct fec_enet_priv_rx_q *rxq;
  3712. -   int i;
  3713. -
  3714. -   for (i = fep->num_rx_queues - 1; i >= 0; i--) {
  3715. -       rxq = fep->rx_queue[i];
  3716. -
  3717. -       if (!rxq->page_pool)
  3718. -           continue;
  3719. -
  3720. -       page_pool_get_stats(rxq->page_pool, &stats);
  3721. -   }
  3722. -
  3723. -   page_pool_ethtool_stats_get(data, &stats);
  3724. -}
  3725. -
  3726.  static void fec_enet_get_ethtool_stats(struct net_device *dev,
  3727.                        struct ethtool_stats *stats, u64 *data)
  3728.  {
  3729. @@ -2774,12 +2689,6 @@ static void fec_enet_get_ethtool_stats(s
  3730.         fec_enet_update_ethtool_stats(dev);
  3731.  
  3732.     memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
  3733. -   data += FEC_STATS_SIZE / sizeof(u64);
  3734. -
  3735. -   fec_enet_get_xdp_stats(fep, data);
  3736. -   data += XDP_STATS_TOTAL;
  3737. -
  3738. -   fec_enet_page_pool_stats(fep, data);
  3739.  }
  3740.  
  3741.  static void fec_enet_get_strings(struct net_device *netdev,
  3742. @@ -2788,16 +2697,9 @@ static void fec_enet_get_strings(struct
  3743.     int i;
  3744.     switch (stringset) {
  3745.     case ETH_SS_STATS:
  3746. -       for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
  3747. -           memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN);
  3748. -           data += ETH_GSTRING_LEN;
  3749. -       }
  3750. -       for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
  3751. -           strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN);
  3752. -           data += ETH_GSTRING_LEN;
  3753. -       }
  3754. -       page_pool_ethtool_stats_get_strings(data);
  3755. -
  3756. +       for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
  3757. +           memcpy(data + i * ETH_GSTRING_LEN,
  3758. +               fec_stats[i].name, ETH_GSTRING_LEN);
  3759.         break;
  3760.     case ETH_SS_TEST:
  3761.         net_selftest_get_strings(data);
  3762. @@ -2807,14 +2709,9 @@ static void fec_enet_get_strings(struct
  3763.  
  3764.  static int fec_enet_get_sset_count(struct net_device *dev, int sset)
  3765.  {
  3766. -   int count;
  3767. -
  3768.     switch (sset) {
  3769.     case ETH_SS_STATS:
  3770. -       count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
  3771. -       count += page_pool_ethtool_stats_get_count();
  3772. -       return count;
  3773. -
  3774. +       return ARRAY_SIZE(fec_stats);
  3775.     case ETH_SS_TEST:
  3776.         return net_selftest_get_count();
  3777.     default:
  3778. @@ -2825,8 +2722,7 @@ static int fec_enet_get_sset_count(struc
  3779.  static void fec_enet_clear_ethtool_stats(struct net_device *dev)
  3780.  {
  3781.     struct fec_enet_private *fep = netdev_priv(dev);
  3782. -   struct fec_enet_priv_rx_q *rxq;
  3783. -   int i, j;
  3784. +   int i;
  3785.  
  3786.     /* Disable MIB statistics counters */
  3787.     writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
  3788. @@ -2834,12 +2730,6 @@ static void fec_enet_clear_ethtool_stats
  3789.     for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
  3790.         writel(0, fep->hwp + fec_stats[i].offset);
  3791.  
  3792. -   for (i = fep->num_rx_queues - 1; i >= 0; i--) {
  3793. -       rxq = fep->rx_queue[i];
  3794. -       for (j = 0; j < XDP_STATS_TOTAL; j++)
  3795. -           rxq->stats[j] = 0;
  3796. -   }
  3797. -
  3798.     /* Don't disable MIB statistics counters */
  3799.     writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
  3800.  }
  3801. @@ -3193,9 +3083,6 @@ static void fec_enet_free_buffers(struct
  3802.         for (i = 0; i < rxq->bd.ring_size; i++)
  3803.             page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
  3804.  
  3805. -       for (i = 0; i < XDP_STATS_TOTAL; i++)
  3806. -           rxq->stats[i] = 0;
  3807. -
  3808.         if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
  3809.             xdp_rxq_info_unreg(&rxq->xdp_rxq);
  3810.         page_pool_destroy(rxq->page_pool);
  3811. @@ -3675,150 +3562,6 @@ static u16 fec_enet_select_queue(struct
  3812.     return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
  3813.  }
  3814.  
  3815. -static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
  3816. -{
  3817. -   struct fec_enet_private *fep = netdev_priv(dev);
  3818. -   bool is_run = netif_running(dev);
  3819. -   struct bpf_prog *old_prog;
  3820. -
  3821. -   switch (bpf->command) {
  3822. -   case XDP_SETUP_PROG:
  3823. -       /* No need to support the SoCs that require to
  3824. -        * do the frame swap because the performance wouldn't be
  3825. -        * better than the skb mode.
  3826. -        */
  3827. -       if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
  3828. -           return -EOPNOTSUPP;
  3829. -
  3830. -       if (is_run) {
  3831. -           napi_disable(&fep->napi);
  3832. -           netif_tx_disable(dev);
  3833. -       }
  3834. -
  3835. -       old_prog = xchg(&fep->xdp_prog, bpf->prog);
  3836. -       fec_restart(dev);
  3837. -
  3838. -       if (is_run) {
  3839. -           napi_enable(&fep->napi);
  3840. -           netif_tx_start_all_queues(dev);
  3841. -       }
  3842. -
  3843. -       if (old_prog)
  3844. -           bpf_prog_put(old_prog);
  3845. -
  3846. -       return 0;
  3847. -
  3848. -   case XDP_SETUP_XSK_POOL:
  3849. -       return -EOPNOTSUPP;
  3850. -
  3851. -   default:
  3852. -       return -EOPNOTSUPP;
  3853. -   }
  3854. -}
  3855. -
  3856. -static int
  3857. -fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
  3858. -{
  3859. -   if (unlikely(index < 0))
  3860. -       return 0;
  3861. -
  3862. -   return (index % fep->num_tx_queues);
  3863. -}
  3864. -
  3865. -static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
  3866. -                  struct fec_enet_priv_tx_q *txq,
  3867. -                  struct xdp_frame *frame)
  3868. -{
  3869. -   unsigned int index, status, estatus;
  3870. -   struct bufdesc *bdp, *last_bdp;
  3871. -   dma_addr_t dma_addr;
  3872. -   int entries_free;
  3873. -
  3874. -   entries_free = fec_enet_get_free_txdesc_num(txq);
  3875. -   if (entries_free < MAX_SKB_FRAGS + 1) {
  3876. -       netdev_err(fep->netdev, "NOT enough BD for SG!\n");
  3877. -       return NETDEV_TX_OK;
  3878. -   }
  3879. -
  3880. -   /* Fill in a Tx ring entry */
  3881. -   bdp = txq->bd.cur;
  3882. -   last_bdp = bdp;
  3883. -   status = fec16_to_cpu(bdp->cbd_sc);
  3884. -   status &= ~BD_ENET_TX_STATS;
  3885. -
  3886. -   index = fec_enet_get_bd_index(bdp, &txq->bd);
  3887. -
  3888. -   dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
  3889. -                 frame->len, DMA_TO_DEVICE);
  3890. -   if (dma_mapping_error(&fep->pdev->dev, dma_addr))
  3891. -       return FEC_ENET_XDP_CONSUMED;
  3892. -
  3893. -   status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
  3894. -   if (fep->bufdesc_ex)
  3895. -       estatus = BD_ENET_TX_INT;
  3896. -
  3897. -   bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
  3898. -   bdp->cbd_datlen = cpu_to_fec16(frame->len);
  3899. -
  3900. -   if (fep->bufdesc_ex) {
  3901. -       struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
  3902. -
  3903. -       if (fep->quirks & FEC_QUIRK_HAS_AVB)
  3904. -           estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
  3905. -
  3906. -       ebdp->cbd_bdu = 0;
  3907. -       ebdp->cbd_esc = cpu_to_fec32(estatus);
  3908. -   }
  3909. -
  3910. -   index = fec_enet_get_bd_index(last_bdp, &txq->bd);
  3911. -   txq->tx_skbuff[index] = NULL;
  3912. -
  3913. -   /* Send it on its way.  Tell FEC it's ready, interrupt when done,
  3914. -    * it's the last BD of the frame, and to put the CRC on the end.
  3915. -    */
  3916. -   status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
  3917. -   bdp->cbd_sc = cpu_to_fec16(status);
  3918. -
  3919. -   /* If this was the last BD in the ring, start at the beginning again. */
  3920. -   bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
  3921. -
  3922. -   txq->bd.cur = bdp;
  3923. -
  3924. -   return 0;
  3925. -}
  3926. -
  3927. -static int fec_enet_xdp_xmit(struct net_device *dev,
  3928. -                int num_frames,
  3929. -                struct xdp_frame **frames,
  3930. -                u32 flags)
  3931. -{
  3932. -   struct fec_enet_private *fep = netdev_priv(dev);
  3933. -   struct fec_enet_priv_tx_q *txq;
  3934. -   int cpu = smp_processor_id();
  3935. -   struct netdev_queue *nq;
  3936. -   unsigned int queue;
  3937. -   int i;
  3938. -
  3939. -   queue = fec_enet_xdp_get_tx_queue(fep, cpu);
  3940. -   txq = fep->tx_queue[queue];
  3941. -   nq = netdev_get_tx_queue(fep->netdev, queue);
  3942. -
  3943. -   __netif_tx_lock(nq, cpu);
  3944. -
  3945. -   for (i = 0; i < num_frames; i++)
  3946. -       fec_enet_txq_xmit_frame(fep, txq, frames[i]);
  3947. -
  3948. -   /* Make sure the update to bdp and tx_skbuff are performed. */
  3949. -   wmb();
  3950. -
  3951. -   /* Trigger transmission start */
  3952. -   writel(0, txq->bd.reg_desc_active);
  3953. -
  3954. -   __netif_tx_unlock(nq);
  3955. -
  3956. -   return num_frames;
  3957. -}
  3958. -
  3959.  static const struct net_device_ops fec_netdev_ops = {
  3960.     .ndo_open       = fec_enet_open,
  3961.     .ndo_stop       = fec_enet_close,
  3962. @@ -3833,8 +3576,6 @@ static const struct net_device_ops fec_n
  3963.     .ndo_poll_controller    = fec_poll_controller,
  3964.  #endif
  3965.     .ndo_set_features   = fec_set_features,
  3966. -   .ndo_bpf        = fec_enet_bpf,
  3967. -   .ndo_xdp_xmit       = fec_enet_xdp_xmit,
  3968.  };
  3969.  
  3970.  static const unsigned short offset_des_active_rxq[] = {
  3971. diff -rupN a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
  3972. --- a/drivers/net/ethernet/freescale/fec_ptp.c  2022-12-25 22:41:39.000000000 +0100
  3973. +++ b/drivers/net/ethernet/freescale/fec_ptp.c  2022-12-31 15:56:55.298955279 +0100
  3974. @@ -88,9 +88,6 @@
  3975.  #define FEC_CHANNLE_0      0
  3976.  #define DEFAULT_PPS_CHANNEL    FEC_CHANNLE_0
  3977.  
  3978. -#define FEC_PTP_MAX_NSEC_PERIOD        4000000000ULL
  3979. -#define FEC_PTP_MAX_NSEC_COUNTER   0x80000000ULL
  3980. -
  3981.  /**
  3982.   * fec_ptp_enable_pps
  3983.   * @fep: the fec_enet_private structure handle
  3984. @@ -201,78 +198,6 @@ static int fec_ptp_enable_pps(struct fec
  3985.     return 0;
  3986.  }
  3987.  
  3988. -static int fec_ptp_pps_perout(struct fec_enet_private *fep)
  3989. -{
  3990. -   u32 compare_val, ptp_hc, temp_val;
  3991. -   u64 curr_time;
  3992. -   unsigned long flags;
  3993. -
  3994. -   spin_lock_irqsave(&fep->tmreg_lock, flags);
  3995. -
  3996. -   /* Update time counter */
  3997. -   timecounter_read(&fep->tc);
  3998. -
  3999. -   /* Get the current ptp hardware time counter */
  4000. -   temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
  4001. -   temp_val |= FEC_T_CTRL_CAPTURE;
  4002. -   writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
  4003. -   if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
  4004. -       udelay(1);
  4005. -
  4006. -   ptp_hc = readl(fep->hwp + FEC_ATIME);
  4007. -
  4008. -   /* Convert the ptp local counter to 1588 timestamp */
  4009. -   curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
  4010. -
  4011. -   /* If the pps start time less than current time add 100ms, just return.
  4012. -    * Because the software might not able to set the comparison time into
  4013. -    * the FEC_TCCR register in time and missed the start time.
  4014. -    */
  4015. -   if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
  4016. -       dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
  4017. -       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  4018. -       return -1;
  4019. -   }
  4020. -
  4021. -   compare_val = fep->perout_stime - curr_time + ptp_hc;
  4022. -   compare_val &= fep->cc.mask;
  4023. -
  4024. -   writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel));
  4025. -   fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask;
  4026. -
  4027. -   /* Enable compare event when overflow */
  4028. -   temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
  4029. -   temp_val |= FEC_T_CTRL_PINPER;
  4030. -   writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
  4031. -
  4032. -   /* Compare channel setting. */
  4033. -   temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
  4034. -   temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
  4035. -   temp_val &= ~(1 << FEC_T_TDRE_OFFSET);
  4036. -   temp_val &= ~(FEC_T_TMODE_MASK);
  4037. -   temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET);
  4038. -   writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel));
  4039. -
  4040. -   /* Write the second compare event timestamp and calculate
  4041. -    * the third timestamp. Refer the TCCR register detail in the spec.
  4042. -    */
  4043. -   writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
  4044. -   fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
  4045. -   spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  4046. -
  4047. -   return 0;
  4048. -}
  4049. -
  4050. -static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
  4051. -{
  4052. -   struct fec_enet_private *fep = container_of(timer,
  4053. -                   struct fec_enet_private, perout_timer);
  4054. -
  4055. -   fec_ptp_pps_perout(fep);
  4056. -
  4057. -   return HRTIMER_NORESTART;
  4058. -}
  4059. -
  4060.  /**
  4061.   * fec_ptp_read - read raw cycle counter (to be used by time counter)
  4062.   * @cc: the cyclecounter structure
  4063. @@ -338,21 +263,18 @@ void fec_ptp_start_cyclecounter(struct n
  4064.  }
  4065.  
  4066.  /**
  4067. - * fec_ptp_adjfine - adjust ptp cycle frequency
  4068. + * fec_ptp_adjfreq - adjust ptp cycle frequency
  4069.   * @ptp: the ptp clock structure
  4070. - * @scaled_ppm: scaled parts per million adjustment from base
  4071. + * @ppb: parts per billion adjustment from base
  4072.   *
  4073.   * Adjust the frequency of the ptp cycle counter by the
  4074. - * indicated amount from the base frequency.
  4075. - *
  4076. - * Scaled parts per million is ppm with a 16-bit binary fractional field.
  4077. + * indicated ppb from the base frequency.
  4078.   *
  4079.   * Because ENET hardware frequency adjust is complex,
  4080.   * using software method to do that.
  4081.   */
  4082. -static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
  4083. +static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  4084.  {
  4085. -   s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
  4086.     unsigned long flags;
  4087.     int neg_adj = 0;
  4088.     u32 i, tmp;
  4089. @@ -503,17 +425,6 @@ static int fec_ptp_settime(struct ptp_cl
  4090.     return 0;
  4091.  }
  4092.  
  4093. -static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
  4094. -{
  4095. -   unsigned long flags;
  4096. -
  4097. -   spin_lock_irqsave(&fep->tmreg_lock, flags);
  4098. -   writel(0, fep->hwp + FEC_TCSR(channel));
  4099. -   spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  4100. -
  4101. -   return 0;
  4102. -}
  4103. -
  4104.  /**
  4105.   * fec_ptp_enable
  4106.   * @ptp: the ptp clock structure
  4107. @@ -526,84 +437,14 @@ static int fec_ptp_enable(struct ptp_clo
  4108.  {
  4109.     struct fec_enet_private *fep =
  4110.         container_of(ptp, struct fec_enet_private, ptp_caps);
  4111. -   ktime_t timeout;
  4112. -   struct timespec64 start_time, period;
  4113. -   u64 curr_time, delta, period_ns;
  4114. -   unsigned long flags;
  4115.     int ret = 0;
  4116.  
  4117.     if (rq->type == PTP_CLK_REQ_PPS) {
  4118.         ret = fec_ptp_enable_pps(fep, on);
  4119.  
  4120.         return ret;
  4121. -   } else if (rq->type == PTP_CLK_REQ_PEROUT) {
  4122. -       /* Reject requests with unsupported flags */
  4123. -       if (rq->perout.flags)
  4124. -           return -EOPNOTSUPP;
  4125. -
  4126. -       if (rq->perout.index != DEFAULT_PPS_CHANNEL)
  4127. -           return -EOPNOTSUPP;
  4128. -
  4129. -       fep->pps_channel = DEFAULT_PPS_CHANNEL;
  4130. -       period.tv_sec = rq->perout.period.sec;
  4131. -       period.tv_nsec = rq->perout.period.nsec;
  4132. -       period_ns = timespec64_to_ns(&period);
  4133. -
  4134. -       /* FEC PTP timer only has 31 bits, so if the period exceed
  4135. -        * 4s is not supported.
  4136. -        */
  4137. -       if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) {
  4138. -           dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n");
  4139. -           return -EOPNOTSUPP;
  4140. -       }
  4141. -
  4142. -       fep->reload_period = div_u64(period_ns, 2);
  4143. -       if (on && fep->reload_period) {
  4144. -           /* Convert 1588 timestamp to ns*/
  4145. -           start_time.tv_sec = rq->perout.start.sec;
  4146. -           start_time.tv_nsec = rq->perout.start.nsec;
  4147. -           fep->perout_stime = timespec64_to_ns(&start_time);
  4148. -
  4149. -           mutex_lock(&fep->ptp_clk_mutex);
  4150. -           if (!fep->ptp_clk_on) {
  4151. -               dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n");
  4152. -               mutex_unlock(&fep->ptp_clk_mutex);
  4153. -               return -EOPNOTSUPP;
  4154. -           }
  4155. -           spin_lock_irqsave(&fep->tmreg_lock, flags);
  4156. -           /* Read current timestamp */
  4157. -           curr_time = timecounter_read(&fep->tc);
  4158. -           spin_unlock_irqrestore(&fep->tmreg_lock, flags);
  4159. -           mutex_unlock(&fep->ptp_clk_mutex);
  4160. -
  4161. -           /* Calculate time difference */
  4162. -           delta = fep->perout_stime - curr_time;
  4163. -
  4164. -           if (fep->perout_stime <= curr_time) {
  4165. -               dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
  4166. -               return -EINVAL;
  4167. -           }
  4168. -
  4169. -           /* Because the timer counter of FEC only has 31-bits, correspondingly,
  4170. -            * the time comparison register FEC_TCCR also only low 31 bits can be
  4171. -            * set. If the start time of pps signal exceeds current time more than
  4172. -            * 0x80000000 ns, a software timer is used and the timer expires about
  4173. -            * 1 second before the start time to be able to set FEC_TCCR.
  4174. -            */
  4175. -           if (delta > FEC_PTP_MAX_NSEC_COUNTER) {
  4176. -               timeout = ns_to_ktime(delta - NSEC_PER_SEC);
  4177. -               hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL);
  4178. -           } else {
  4179. -               return fec_ptp_pps_perout(fep);
  4180. -           }
  4181. -       } else {
  4182. -           fec_ptp_pps_disable(fep, fep->pps_channel);
  4183. -       }
  4184. -
  4185. -       return 0;
  4186. -   } else {
  4187. -       return -EOPNOTSUPP;
  4188.     }
  4189. +   return -EOPNOTSUPP;
  4190.  }
  4191.  
  4192.  /**
  4193. @@ -742,10 +583,10 @@ void fec_ptp_init(struct platform_device
  4194.     fep->ptp_caps.max_adj = 250000000;
  4195.     fep->ptp_caps.n_alarm = 0;
  4196.     fep->ptp_caps.n_ext_ts = 0;
  4197. -   fep->ptp_caps.n_per_out = 1;
  4198. +   fep->ptp_caps.n_per_out = 0;
  4199.     fep->ptp_caps.n_pins = 0;
  4200.     fep->ptp_caps.pps = 1;
  4201. -   fep->ptp_caps.adjfine = fec_ptp_adjfine;
  4202. +   fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
  4203.     fep->ptp_caps.adjtime = fec_ptp_adjtime;
  4204.     fep->ptp_caps.gettime64 = fec_ptp_gettime;
  4205.     fep->ptp_caps.settime64 = fec_ptp_settime;
  4206. @@ -764,9 +605,6 @@ void fec_ptp_init(struct platform_device
  4207.  
  4208.     INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
  4209.  
  4210. -   hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
  4211. -   fep->perout_timer.function = fec_ptp_pps_perout_handler;
  4212. -
  4213.     irq = platform_get_irq_byname_optional(pdev, "pps");
  4214.     if (irq < 0)
  4215.         irq = platform_get_irq_optional(pdev, irq_idx);
  4216. @@ -796,7 +634,6 @@ void fec_ptp_stop(struct platform_device
  4217.     struct fec_enet_private *fep = netdev_priv(ndev);
  4218.  
  4219.     cancel_delayed_work_sync(&fep->time_keep);
  4220. -   hrtimer_cancel(&fep->perout_timer);
  4221.     if (fep->ptp_clock)
  4222.         ptp_clock_unregister(fep->ptp_clock);
  4223.  }
  4224. diff -rupN a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
  4225. --- a/drivers/net/ethernet/freescale/fman/Kconfig   2022-12-25 22:41:39.000000000 +0100
  4226. +++ b/drivers/net/ethernet/freescale/fman/Kconfig   2022-12-31 15:56:55.298955279 +0100
  4227. @@ -3,8 +3,7 @@ config FSL_FMAN
  4228.     tristate "FMan support"
  4229.     depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
  4230.     select GENERIC_ALLOCATOR
  4231. -   select PHYLINK
  4232. -   select PCS_LYNX
  4233. +   select PHYLIB
  4234.     select CRC32
  4235.     default n
  4236.     help
  4237. diff -rupN a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
  4238. --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c  2022-12-25 22:41:39.000000000 +0100
  4239. +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c  2022-12-31 15:56:55.298955279 +0100
  4240. @@ -17,7 +17,6 @@
  4241.  #include <linux/crc32.h>
  4242.  #include <linux/of_mdio.h>
  4243.  #include <linux/mii.h>
  4244. -#include <linux/netdevice.h>
  4245.  
  4246.  /* TBI register addresses */
  4247.  #define MII_TBICON     0x11
  4248. @@ -30,6 +29,9 @@
  4249.  #define TBICON_CLK_SELECT  0x0020  /* Clock select */
  4250.  #define TBICON_MI_MODE     0x0010  /* GMII mode (TBI if not set) */
  4251.  
  4252. +#define TBIANA_SGMII       0x4001
  4253. +#define TBIANA_1000X       0x01a0
  4254. +
  4255.  /* Interrupt Mask Register (IMASK) */
  4256.  #define DTSEC_IMASK_BREN   0x80000000
  4257.  #define DTSEC_IMASK_RXCEN  0x40000000
  4258. @@ -90,10 +92,9 @@
  4259.  
  4260.  #define DTSEC_ECNTRL_GMIIM     0x00000040
  4261.  #define DTSEC_ECNTRL_TBIM      0x00000020
  4262. +#define DTSEC_ECNTRL_SGMIIM        0x00000002
  4263.  #define DTSEC_ECNTRL_RPM       0x00000010
  4264.  #define DTSEC_ECNTRL_R100M     0x00000008
  4265. -#define DTSEC_ECNTRL_RMM       0x00000004
  4266. -#define DTSEC_ECNTRL_SGMIIM        0x00000002
  4267.  #define DTSEC_ECNTRL_QSGMIIM       0x00000001
  4268.  
  4269.  #define TCTRL_TTSE         0x00000040
  4270. @@ -317,8 +318,7 @@ struct fman_mac {
  4271.     void *fm;
  4272.     struct fman_rev_info fm_rev_info;
  4273.     bool basex_if;
  4274. -   struct mdio_device *tbidev;
  4275. -   struct phylink_pcs pcs;
  4276. +   struct phy_device *tbiphy;
  4277.  };
  4278.  
  4279.  static void set_dflts(struct dtsec_cfg *cfg)
  4280. @@ -356,14 +356,56 @@ static int init(struct dtsec_regs __iome
  4281.         phy_interface_t iface, u16 iface_speed, u64 addr,
  4282.         u32 exception_mask, u8 tbi_addr)
  4283.  {
  4284. +   bool is_rgmii, is_sgmii, is_qsgmii;
  4285.     enet_addr_t eth_addr;
  4286. -   u32 tmp = 0;
  4287. +   u32 tmp;
  4288.     int i;
  4289.  
  4290.     /* Soft reset */
  4291.     iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
  4292.     iowrite32be(0, &regs->maccfg1);
  4293.  
  4294. +   /* dtsec_id2 */
  4295. +   tmp = ioread32be(&regs->tsec_id2);
  4296. +
  4297. +   /* check RGMII support */
  4298. +   if (iface == PHY_INTERFACE_MODE_RGMII ||
  4299. +       iface == PHY_INTERFACE_MODE_RGMII_ID ||
  4300. +       iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  4301. +       iface == PHY_INTERFACE_MODE_RGMII_TXID ||
  4302. +       iface == PHY_INTERFACE_MODE_RMII)
  4303. +       if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  4304. +           return -EINVAL;
  4305. +
  4306. +   if (iface == PHY_INTERFACE_MODE_SGMII ||
  4307. +       iface == PHY_INTERFACE_MODE_MII)
  4308. +       if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
  4309. +           return -EINVAL;
  4310. +
  4311. +   is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
  4312. +          iface == PHY_INTERFACE_MODE_RGMII_ID ||
  4313. +          iface == PHY_INTERFACE_MODE_RGMII_RXID ||
  4314. +          iface == PHY_INTERFACE_MODE_RGMII_TXID;
  4315. +   is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
  4316. +   is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
  4317. +
  4318. +   tmp = 0;
  4319. +   if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
  4320. +       tmp |= DTSEC_ECNTRL_GMIIM;
  4321. +   if (is_sgmii)
  4322. +       tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
  4323. +   if (is_qsgmii)
  4324. +       tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
  4325. +           DTSEC_ECNTRL_QSGMIIM);
  4326. +   if (is_rgmii)
  4327. +       tmp |= DTSEC_ECNTRL_RPM;
  4328. +   if (iface_speed == SPEED_100)
  4329. +       tmp |= DTSEC_ECNTRL_R100M;
  4330. +
  4331. +   iowrite32be(tmp, &regs->ecntrl);
  4332. +
  4333. +   tmp = 0;
  4334. +
  4335.     if (cfg->tx_pause_time)
  4336.         tmp |= cfg->tx_pause_time;
  4337.     if (cfg->tx_pause_time_extd)
  4338. @@ -404,10 +446,17 @@ static int init(struct dtsec_regs __iome
  4339.  
  4340.     tmp = 0;
  4341.  
  4342. +   if (iface_speed < SPEED_1000)
  4343. +       tmp |= MACCFG2_NIBBLE_MODE;
  4344. +   else if (iface_speed == SPEED_1000)
  4345. +       tmp |= MACCFG2_BYTE_MODE;
  4346. +
  4347.     tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
  4348.         MACCFG2_PREAMBLE_LENGTH_MASK;
  4349.     if (cfg->tx_pad_crc)
  4350.         tmp |= MACCFG2_PAD_CRC_EN;
  4351. +   /* Full Duplex */
  4352. +   tmp |= MACCFG2_FULL_DUPLEX;
  4353.     iowrite32be(tmp, &regs->maccfg2);
  4354.  
  4355.     tmp = (((cfg->non_back_to_back_ipg1 <<
  4356. @@ -476,6 +525,10 @@ static void set_bucket(struct dtsec_regs
  4357.  
  4358.  static int check_init_parameters(struct fman_mac *dtsec)
  4359.  {
  4360. +   if (dtsec->max_speed >= SPEED_10000) {
  4361. +       pr_err("1G MAC driver supports 1G or lower speeds\n");
  4362. +       return -EINVAL;
  4363. +   }
  4364.     if ((dtsec->dtsec_drv_param)->rx_prepend >
  4365.         MAX_PACKET_ALIGNMENT) {
  4366.         pr_err("packetAlignmentPadding can't be > than %d\n",
  4367. @@ -577,10 +630,22 @@ static int get_exception_flag(enum fman_
  4368.     return bit_mask;
  4369.  }
  4370.  
  4371. +static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
  4372. +{
  4373. +   /* Checks if dTSEC driver parameters were initialized */
  4374. +   if (!dtsec_drv_params)
  4375. +       return true;
  4376. +
  4377. +   return false;
  4378. +}
  4379. +
  4380.  static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
  4381.  {
  4382.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4383.  
  4384. +   if (is_init_done(dtsec->dtsec_drv_param))
  4385. +       return 0;
  4386. +
  4387.     return (u16)ioread32be(&regs->maxfrm);
  4388.  }
  4389.  
  4390. @@ -617,7 +682,6 @@ static void dtsec_isr(void *handle)
  4391.         dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
  4392.     if (event & DTSEC_IMASK_XFUNEN) {
  4393.         /* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
  4394. -       /* FIXME: This races with the rest of the driver! */
  4395.         if (dtsec->fm_rev_info.major == 2) {
  4396.             u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
  4397.             /* a. Write 0x00E0_0C00 to DTSEC_ID
  4398. @@ -750,43 +814,6 @@ static void free_init_resources(struct f
  4399.     dtsec->unicast_addr_hash = NULL;
  4400.  }
  4401.  
  4402. -static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
  4403. -{
  4404. -   return container_of(pcs, struct fman_mac, pcs);
  4405. -}
  4406. -
  4407. -static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
  4408. -               struct phylink_link_state *state)
  4409. -{
  4410. -   struct fman_mac *dtsec = pcs_to_dtsec(pcs);
  4411. -
  4412. -   phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
  4413. -}
  4414. -
  4415. -static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
  4416. -               phy_interface_t interface,
  4417. -               const unsigned long *advertising,
  4418. -               bool permit_pause_to_mac)
  4419. -{
  4420. -   struct fman_mac *dtsec = pcs_to_dtsec(pcs);
  4421. -
  4422. -   return phylink_mii_c22_pcs_config(dtsec->tbidev, mode, interface,
  4423. -                     advertising);
  4424. -}
  4425. -
  4426. -static void dtsec_pcs_an_restart(struct phylink_pcs *pcs)
  4427. -{
  4428. -   struct fman_mac *dtsec = pcs_to_dtsec(pcs);
  4429. -
  4430. -   phylink_mii_c22_pcs_an_restart(dtsec->tbidev);
  4431. -}
  4432. -
  4433. -static const struct phylink_pcs_ops dtsec_pcs_ops = {
  4434. -   .pcs_get_state = dtsec_pcs_get_state,
  4435. -   .pcs_config = dtsec_pcs_config,
  4436. -   .pcs_an_restart = dtsec_pcs_an_restart,
  4437. -};
  4438. -
  4439.  static void graceful_start(struct fman_mac *dtsec)
  4440.  {
  4441.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4442. @@ -827,11 +854,36 @@ static void graceful_stop(struct fman_ma
  4443.  
  4444.  static int dtsec_enable(struct fman_mac *dtsec)
  4445.  {
  4446. +   struct dtsec_regs __iomem *regs = dtsec->regs;
  4447. +   u32 tmp;
  4448. +
  4449. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4450. +       return -EINVAL;
  4451. +
  4452. +   /* Enable */
  4453. +   tmp = ioread32be(&regs->maccfg1);
  4454. +   tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
  4455. +   iowrite32be(tmp, &regs->maccfg1);
  4456. +
  4457. +   /* Graceful start - clear the graceful Rx/Tx stop bit */
  4458. +   graceful_start(dtsec);
  4459. +
  4460.     return 0;
  4461.  }
  4462.  
  4463.  static void dtsec_disable(struct fman_mac *dtsec)
  4464.  {
  4465. +   struct dtsec_regs __iomem *regs = dtsec->regs;
  4466. +   u32 tmp;
  4467. +
  4468. +   WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
  4469. +
  4470. +   /* Graceful stop - Assert the graceful Rx/Tx stop bit */
  4471. +   graceful_stop(dtsec);
  4472. +
  4473. +   tmp = ioread32be(&regs->maccfg1);
  4474. +   tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
  4475. +   iowrite32be(tmp, &regs->maccfg1);
  4476.  }
  4477.  
  4478.  static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
  4479. @@ -842,6 +894,11 @@ static int dtsec_set_tx_pause_frames(str
  4480.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4481.     u32 ptv = 0;
  4482.  
  4483. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4484. +       return -EINVAL;
  4485. +
  4486. +   graceful_stop(dtsec);
  4487. +
  4488.     if (pause_time) {
  4489.         /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
  4490.         if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
  4491. @@ -862,6 +919,8 @@ static int dtsec_set_tx_pause_frames(str
  4492.         iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
  4493.                 &regs->maccfg1);
  4494.  
  4495. +   graceful_start(dtsec);
  4496. +
  4497.     return 0;
  4498.  }
  4499.  
  4500. @@ -870,6 +929,11 @@ static int dtsec_accept_rx_pause_frames(
  4501.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4502.     u32 tmp;
  4503.  
  4504. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4505. +       return -EINVAL;
  4506. +
  4507. +   graceful_stop(dtsec);
  4508. +
  4509.     tmp = ioread32be(&regs->maccfg1);
  4510.     if (en)
  4511.         tmp |= MACCFG1_RX_FLOW;
  4512. @@ -877,124 +941,17 @@ static int dtsec_accept_rx_pause_frames(
  4513.         tmp &= ~MACCFG1_RX_FLOW;
  4514.     iowrite32be(tmp, &regs->maccfg1);
  4515.  
  4516. -   return 0;
  4517. -}
  4518. -
  4519. -static struct phylink_pcs *dtsec_select_pcs(struct phylink_config *config,
  4520. -                       phy_interface_t iface)
  4521. -{
  4522. -   struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
  4523. -
  4524. -   switch (iface) {
  4525. -   case PHY_INTERFACE_MODE_SGMII:
  4526. -   case PHY_INTERFACE_MODE_1000BASEX:
  4527. -   case PHY_INTERFACE_MODE_2500BASEX:
  4528. -       return &dtsec->pcs;
  4529. -   default:
  4530. -       return NULL;
  4531. -   }
  4532. -}
  4533. -
  4534. -static void dtsec_mac_config(struct phylink_config *config, unsigned int mode,
  4535. -                const struct phylink_link_state *state)
  4536. -{
  4537. -   struct mac_device *mac_dev = fman_config_to_mac(config);
  4538. -   struct dtsec_regs __iomem *regs = mac_dev->fman_mac->regs;
  4539. -   u32 tmp;
  4540. -
  4541. -   switch (state->interface) {
  4542. -   case PHY_INTERFACE_MODE_RMII:
  4543. -       tmp = DTSEC_ECNTRL_RMM;
  4544. -       break;
  4545. -   case PHY_INTERFACE_MODE_RGMII:
  4546. -   case PHY_INTERFACE_MODE_RGMII_ID:
  4547. -   case PHY_INTERFACE_MODE_RGMII_RXID:
  4548. -   case PHY_INTERFACE_MODE_RGMII_TXID:
  4549. -       tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM;
  4550. -       break;
  4551. -   case PHY_INTERFACE_MODE_SGMII:
  4552. -   case PHY_INTERFACE_MODE_1000BASEX:
  4553. -   case PHY_INTERFACE_MODE_2500BASEX:
  4554. -       tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM;
  4555. -       break;
  4556. -   default:
  4557. -       dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n",
  4558. -            phy_modes(state->interface));
  4559. -       return;
  4560. -   }
  4561. -
  4562. -   iowrite32be(tmp, &regs->ecntrl);
  4563. -}
  4564. -
  4565. -static void dtsec_link_up(struct phylink_config *config, struct phy_device *phy,
  4566. -             unsigned int mode, phy_interface_t interface,
  4567. -             int speed, int duplex, bool tx_pause, bool rx_pause)
  4568. -{
  4569. -   struct mac_device *mac_dev = fman_config_to_mac(config);
  4570. -   struct fman_mac *dtsec = mac_dev->fman_mac;
  4571. -   struct dtsec_regs __iomem *regs = dtsec->regs;
  4572. -   u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
  4573. -            FSL_FM_PAUSE_TIME_DISABLE;
  4574. -   u32 tmp;
  4575. -
  4576. -   dtsec_set_tx_pause_frames(dtsec, 0, pause_time, 0);
  4577. -   dtsec_accept_rx_pause_frames(dtsec, rx_pause);
  4578. -
  4579. -   tmp = ioread32be(&regs->ecntrl);
  4580. -   if (speed == SPEED_100)
  4581. -       tmp |= DTSEC_ECNTRL_R100M;
  4582. -   else
  4583. -       tmp &= ~DTSEC_ECNTRL_R100M;
  4584. -   iowrite32be(tmp, &regs->ecntrl);
  4585. -
  4586. -   tmp = ioread32be(&regs->maccfg2);
  4587. -   tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE | MACCFG2_FULL_DUPLEX);
  4588. -   if (speed >= SPEED_1000)
  4589. -       tmp |= MACCFG2_BYTE_MODE;
  4590. -   else
  4591. -       tmp |= MACCFG2_NIBBLE_MODE;
  4592. -
  4593. -   if (duplex == DUPLEX_FULL)
  4594. -       tmp |= MACCFG2_FULL_DUPLEX;
  4595. -
  4596. -   iowrite32be(tmp, &regs->maccfg2);
  4597. -
  4598. -   mac_dev->update_speed(mac_dev, speed);
  4599. -
  4600. -   /* Enable */
  4601. -   tmp = ioread32be(&regs->maccfg1);
  4602. -   tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
  4603. -   iowrite32be(tmp, &regs->maccfg1);
  4604. -
  4605. -   /* Graceful start - clear the graceful Rx/Tx stop bit */
  4606.     graceful_start(dtsec);
  4607. -}
  4608. -
  4609. -static void dtsec_link_down(struct phylink_config *config, unsigned int mode,
  4610. -               phy_interface_t interface)
  4611. -{
  4612. -   struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
  4613. -   struct dtsec_regs __iomem *regs = dtsec->regs;
  4614. -   u32 tmp;
  4615. -
  4616. -   /* Graceful stop - Assert the graceful Rx/Tx stop bit */
  4617. -   graceful_stop(dtsec);
  4618.  
  4619. -   tmp = ioread32be(&regs->maccfg1);
  4620. -   tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
  4621. -   iowrite32be(tmp, &regs->maccfg1);
  4622. +   return 0;
  4623.  }
  4624.  
  4625. -static const struct phylink_mac_ops dtsec_mac_ops = {
  4626. -   .mac_select_pcs = dtsec_select_pcs,
  4627. -   .mac_config = dtsec_mac_config,
  4628. -   .mac_link_up = dtsec_link_up,
  4629. -   .mac_link_down = dtsec_link_down,
  4630. -};
  4631. -
  4632.  static int dtsec_modify_mac_address(struct fman_mac *dtsec,
  4633.                     const enet_addr_t *enet_addr)
  4634.  {
  4635. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4636. +       return -EINVAL;
  4637. +
  4638.     graceful_stop(dtsec);
  4639.  
  4640.     /* Initialize MAC Station Address registers (1 & 2)
  4641. @@ -1018,6 +975,9 @@ static int dtsec_add_hash_mac_address(st
  4642.     u32 crc = 0xFFFFFFFF;
  4643.     bool mcast, ghtx;
  4644.  
  4645. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4646. +       return -EINVAL;
  4647. +
  4648.     addr = ENET_ADDR_TO_UINT64(*eth_addr);
  4649.  
  4650.     ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  4651. @@ -1077,6 +1037,9 @@ static int dtsec_set_allmulti(struct fma
  4652.     u32 tmp;
  4653.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4654.  
  4655. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4656. +       return -EINVAL;
  4657. +
  4658.     tmp = ioread32be(&regs->rctrl);
  4659.     if (enable)
  4660.         tmp |= RCTRL_MPROM;
  4661. @@ -1093,6 +1056,9 @@ static int dtsec_set_tstamp(struct fman_
  4662.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4663.     u32 rctrl, tctrl;
  4664.  
  4665. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4666. +       return -EINVAL;
  4667. +
  4668.     rctrl = ioread32be(&regs->rctrl);
  4669.     tctrl = ioread32be(&regs->tctrl);
  4670.  
  4671. @@ -1121,6 +1087,9 @@ static int dtsec_del_hash_mac_address(st
  4672.     u32 crc = 0xFFFFFFFF;
  4673.     bool mcast, ghtx;
  4674.  
  4675. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4676. +       return -EINVAL;
  4677. +
  4678.     addr = ENET_ADDR_TO_UINT64(*eth_addr);
  4679.  
  4680.     ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
  4681. @@ -1184,6 +1153,9 @@ static int dtsec_set_promiscuous(struct
  4682.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4683.     u32 tmp;
  4684.  
  4685. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4686. +       return -EINVAL;
  4687. +
  4688.     /* Set unicast promiscuous */
  4689.     tmp = ioread32be(&regs->rctrl);
  4690.     if (new_val)
  4691. @@ -1205,12 +1177,90 @@ static int dtsec_set_promiscuous(struct
  4692.     return 0;
  4693.  }
  4694.  
  4695. +static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
  4696. +{
  4697. +   struct dtsec_regs __iomem *regs = dtsec->regs;
  4698. +   u32 tmp;
  4699. +
  4700. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4701. +       return -EINVAL;
  4702. +
  4703. +   graceful_stop(dtsec);
  4704. +
  4705. +   tmp = ioread32be(&regs->maccfg2);
  4706. +
  4707. +   /* Full Duplex */
  4708. +   tmp |= MACCFG2_FULL_DUPLEX;
  4709. +
  4710. +   tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
  4711. +   if (speed < SPEED_1000)
  4712. +       tmp |= MACCFG2_NIBBLE_MODE;
  4713. +   else if (speed == SPEED_1000)
  4714. +       tmp |= MACCFG2_BYTE_MODE;
  4715. +   iowrite32be(tmp, &regs->maccfg2);
  4716. +
  4717. +   tmp = ioread32be(&regs->ecntrl);
  4718. +   if (speed == SPEED_100)
  4719. +       tmp |= DTSEC_ECNTRL_R100M;
  4720. +   else
  4721. +       tmp &= ~DTSEC_ECNTRL_R100M;
  4722. +   iowrite32be(tmp, &regs->ecntrl);
  4723. +
  4724. +   graceful_start(dtsec);
  4725. +
  4726. +   return 0;
  4727. +}
  4728. +
  4729. +static int dtsec_restart_autoneg(struct fman_mac *dtsec)
  4730. +{
  4731. +   u16 tmp_reg16;
  4732. +
  4733. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4734. +       return -EINVAL;
  4735. +
  4736. +   tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
  4737. +
  4738. +   tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
  4739. +   tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
  4740. +             BMCR_FULLDPLX | BMCR_SPEED1000);
  4741. +
  4742. +   phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  4743. +
  4744. +   return 0;
  4745. +}
  4746. +
  4747. +static void adjust_link_dtsec(struct mac_device *mac_dev)
  4748. +{
  4749. +   struct phy_device *phy_dev = mac_dev->phy_dev;
  4750. +   struct fman_mac *fman_mac;
  4751. +   bool rx_pause, tx_pause;
  4752. +   int err;
  4753. +
  4754. +   fman_mac = mac_dev->fman_mac;
  4755. +   if (!phy_dev->link) {
  4756. +       dtsec_restart_autoneg(fman_mac);
  4757. +
  4758. +       return;
  4759. +   }
  4760. +
  4761. +   dtsec_adjust_link(fman_mac, phy_dev->speed);
  4762. +   mac_dev->update_speed(mac_dev, phy_dev->speed);
  4763. +   fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
  4764. +   err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
  4765. +   if (err < 0)
  4766. +       dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
  4767. +           err);
  4768. +}
  4769. +
  4770.  static int dtsec_set_exception(struct fman_mac *dtsec,
  4771.                    enum fman_mac_exceptions exception, bool enable)
  4772.  {
  4773.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4774.     u32 bit_mask = 0;
  4775.  
  4776. +   if (!is_init_done(dtsec->dtsec_drv_param))
  4777. +       return -EINVAL;
  4778. +
  4779.     if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
  4780.         bit_mask = get_exception_flag(exception);
  4781.         if (bit_mask) {
  4782. @@ -1260,9 +1310,12 @@ static int dtsec_init(struct fman_mac *d
  4783.  {
  4784.     struct dtsec_regs __iomem *regs = dtsec->regs;
  4785.     struct dtsec_cfg *dtsec_drv_param;
  4786. -   u16 max_frm_ln, tbicon;
  4787. +   u16 max_frm_ln;
  4788.     int err;
  4789.  
  4790. +   if (is_init_done(dtsec->dtsec_drv_param))
  4791. +       return -EINVAL;
  4792. +
  4793.     if (DEFAULT_RESET_ON_INIT &&
  4794.         (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
  4795.         pr_err("Can't reset MAC!\n");
  4796. @@ -1277,19 +1330,38 @@ static int dtsec_init(struct fman_mac *d
  4797.  
  4798.     err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
  4799.            dtsec->max_speed, dtsec->addr, dtsec->exceptions,
  4800. -          dtsec->tbidev->addr);
  4801. +          dtsec->tbiphy->mdio.addr);
  4802.     if (err) {
  4803.         free_init_resources(dtsec);
  4804.         pr_err("DTSEC version doesn't support this i/f mode\n");
  4805.         return err;
  4806.     }
  4807.  
  4808. -   /* Configure the TBI PHY Control Register */
  4809. -   tbicon = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
  4810. -   mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
  4811. +   if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
  4812. +       u16 tmp_reg16;
  4813. +
  4814. +       /* Configure the TBI PHY Control Register */
  4815. +       tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
  4816. +       phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  4817. +
  4818. +       tmp_reg16 = TBICON_CLK_SELECT;
  4819. +       phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
  4820. +
  4821. +       tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
  4822. +                BMCR_FULLDPLX | BMCR_SPEED1000);
  4823. +       phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  4824. +
  4825. +       if (dtsec->basex_if)
  4826. +           tmp_reg16 = TBIANA_1000X;
  4827. +       else
  4828. +           tmp_reg16 = TBIANA_SGMII;
  4829. +       phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
  4830.  
  4831. -   tbicon = TBICON_CLK_SELECT;
  4832. -   mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
  4833. +       tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
  4834. +                BMCR_FULLDPLX | BMCR_SPEED1000);
  4835. +
  4836. +       phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
  4837. +   }
  4838.  
  4839.     /* Max Frame Length */
  4840.     max_frm_ln = (u16)ioread32be(&regs->maxfrm);
  4841. @@ -1334,8 +1406,6 @@ static int dtsec_free(struct fman_mac *d
  4842.  
  4843.     kfree(dtsec->dtsec_drv_param);
  4844.     dtsec->dtsec_drv_param = NULL;
  4845. -   if (!IS_ERR_OR_NULL(dtsec->tbidev))
  4846. -       put_device(&dtsec->tbidev->dev);
  4847.     kfree(dtsec);
  4848.  
  4849.     return 0;
  4850. @@ -1364,6 +1434,7 @@ static struct fman_mac *dtsec_config(str
  4851.  
  4852.     dtsec->regs = mac_dev->vaddr;
  4853.     dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
  4854. +   dtsec->max_speed = params->max_speed;
  4855.     dtsec->phy_if = mac_dev->phy_if;
  4856.     dtsec->mac_id = params->mac_id;
  4857.     dtsec->exceptions = (DTSEC_IMASK_BREN   |
  4858. @@ -1386,6 +1457,7 @@ static struct fman_mac *dtsec_config(str
  4859.     dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
  4860.  
  4861.     dtsec->fm = params->fm;
  4862. +   dtsec->basex_if = params->basex_if;
  4863.  
  4864.     /* Save FMan revision */
  4865.     fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
  4866. @@ -1404,18 +1476,18 @@ int dtsec_initialization(struct mac_devi
  4867.     int         err;
  4868.     struct fman_mac     *dtsec;
  4869.     struct device_node  *phy_node;
  4870. -   unsigned long        capabilities;
  4871. -   unsigned long       *supported;
  4872.  
  4873. -   mac_dev->phylink_ops        = &dtsec_mac_ops;
  4874.     mac_dev->set_promisc        = dtsec_set_promiscuous;
  4875.     mac_dev->change_addr        = dtsec_modify_mac_address;
  4876.     mac_dev->add_hash_mac_addr  = dtsec_add_hash_mac_address;
  4877.     mac_dev->remove_hash_mac_addr   = dtsec_del_hash_mac_address;
  4878. +   mac_dev->set_tx_pause       = dtsec_set_tx_pause_frames;
  4879. +   mac_dev->set_rx_pause       = dtsec_accept_rx_pause_frames;
  4880.     mac_dev->set_exception      = dtsec_set_exception;
  4881.     mac_dev->set_allmulti       = dtsec_set_allmulti;
  4882.     mac_dev->set_tstamp     = dtsec_set_tstamp;
  4883.     mac_dev->set_multi      = fman_set_multi;
  4884. +   mac_dev->adjust_link            = adjust_link_dtsec;
  4885.     mac_dev->enable         = dtsec_enable;
  4886.     mac_dev->disable        = dtsec_disable;
  4887.  
  4888. @@ -1430,56 +1502,19 @@ int dtsec_initialization(struct mac_devi
  4889.     dtsec->dtsec_drv_param->tx_pad_crc = true;
  4890.  
  4891.     phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
  4892. -   if (!phy_node || of_device_is_available(phy_node)) {
  4893. -       of_node_put(phy_node);
  4894. +   if (!phy_node) {
  4895. +       pr_err("TBI PHY node is not available\n");
  4896.         err = -EINVAL;
  4897. -       dev_err_probe(mac_dev->dev, err,
  4898. -                 "TBI PCS node is not available\n");
  4899.         goto _return_fm_mac_free;
  4900.     }
  4901.  
  4902. -   dtsec->tbidev = of_mdio_find_device(phy_node);
  4903. -   of_node_put(phy_node);
  4904. -   if (!dtsec->tbidev) {
  4905. -       err = -EPROBE_DEFER;
  4906. -       dev_err_probe(mac_dev->dev, err,
  4907. -                 "could not find mdiodev for PCS\n");
  4908. +   dtsec->tbiphy = of_phy_find_device(phy_node);
  4909. +   if (!dtsec->tbiphy) {
  4910. +       pr_err("of_phy_find_device (TBI PHY) failed\n");
  4911. +       err = -EINVAL;
  4912.         goto _return_fm_mac_free;
  4913.     }
  4914. -   dtsec->pcs.ops = &dtsec_pcs_ops;
  4915. -   dtsec->pcs.poll = true;
  4916. -
  4917. -   supported = mac_dev->phylink_config.supported_interfaces;
  4918. -
  4919. -   /* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are
  4920. -    * supported? If not, we can determine support via the phy if SerDes
  4921. -    * support is added.
  4922. -    */
  4923. -   if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII ||
  4924. -       mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) {
  4925. -       __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
  4926. -       __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
  4927. -   } else if (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) {
  4928. -       __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
  4929. -   }
  4930. -
  4931. -   if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) {
  4932. -       phy_interface_set_rgmii(supported);
  4933. -
  4934. -       /* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports
  4935. -        * RMII and RGMII. However, the only SoCs which support RMII
  4936. -        * are the P1017 and P1023. Avoid advertising this mode on
  4937. -        * other SoCs. This is a bit of a moot point, since there's no
  4938. -        * in-tree support for ethernet on these platforms...
  4939. -        */
  4940. -       if (of_machine_is_compatible("fsl,P1023") ||
  4941. -           of_machine_is_compatible("fsl,P1023RDB"))
  4942. -           __set_bit(PHY_INTERFACE_MODE_RMII, supported);
  4943. -   }
  4944. -
  4945. -   capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
  4946. -   capabilities |= MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
  4947. -   mac_dev->phylink_config.mac_capabilities = capabilities;
  4948. +   put_device(&dtsec->tbiphy->mdio.dev);
  4949.  
  4950.     err = dtsec_init(dtsec);
  4951.     if (err < 0)
  4952. diff -rupN a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
  4953. --- a/drivers/net/ethernet/freescale/fman/fman_mac.h    2022-12-25 22:41:39.000000000 +0100
  4954. +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h    2022-12-31 15:56:55.299955264 +0100
  4955. @@ -170,10 +170,20 @@ struct fman_mac_params {
  4956.      * 0 - FM_MAX_NUM_OF_10G_MACS
  4957.      */
  4958.     u8 mac_id;
  4959. +   /* Note that the speed should indicate the maximum rate that
  4960. +    * this MAC should support rather than the actual speed;
  4961. +    */
  4962. +   u16 max_speed;
  4963.     /* A handle to the FM object this port related to */
  4964.     void *fm;
  4965.     fman_mac_exception_cb *event_cb;    /* MDIO Events Callback Routine */
  4966.     fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
  4967. +   /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
  4968. +    * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
  4969. +    * to interface between MAC and phy/backplane, SGMII phy can still
  4970. +    * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
  4971. +   */
  4972. +   bool basex_if;
  4973.  };
  4974.  
  4975.  struct eth_hash_t {
  4976. diff -rupN a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
  4977. --- a/drivers/net/ethernet/freescale/fman/fman_memac.c  2022-12-25 22:41:39.000000000 +0100
  4978. +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c  2022-12-31 15:56:55.299955264 +0100
  4979. @@ -11,12 +11,42 @@
  4980.  
  4981.  #include <linux/slab.h>
  4982.  #include <linux/io.h>
  4983. -#include <linux/pcs-lynx.h>
  4984.  #include <linux/phy.h>
  4985.  #include <linux/phy_fixed.h>
  4986. -#include <linux/phy/phy.h>
  4987.  #include <linux/of_mdio.h>
  4988.  
  4989. +/* PCS registers */
  4990. +#define MDIO_SGMII_CR          0x00
  4991. +#define MDIO_SGMII_DEV_ABIL_SGMII  0x04
  4992. +#define MDIO_SGMII_LINK_TMR_L      0x12
  4993. +#define MDIO_SGMII_LINK_TMR_H      0x13
  4994. +#define MDIO_SGMII_IF_MODE     0x14
  4995. +
  4996. +/* SGMII Control defines */
  4997. +#define SGMII_CR_AN_EN         0x1000
  4998. +#define SGMII_CR_RESTART_AN        0x0200
  4999. +#define SGMII_CR_FD            0x0100
  5000. +#define SGMII_CR_SPEED_SEL1_1G     0x0040
  5001. +#define SGMII_CR_DEF_VAL       (SGMII_CR_AN_EN | SGMII_CR_FD | \
  5002. +                    SGMII_CR_SPEED_SEL1_1G)
  5003. +
  5004. +/* SGMII Device Ability for SGMII defines */
  5005. +#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
  5006. +#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
  5007. +
  5008. +/* Link timer define */
  5009. +#define LINK_TMR_L         0xa120
  5010. +#define LINK_TMR_H         0x0007
  5011. +#define LINK_TMR_L_BASEX       0xaf08
  5012. +#define LINK_TMR_H_BASEX       0x002f
  5013. +
  5014. +/* SGMII IF Mode defines */
  5015. +#define IF_MODE_USE_SGMII_AN       0x0002
  5016. +#define IF_MODE_SGMII_EN       0x0001
  5017. +#define IF_MODE_SGMII_SPEED_100M   0x0004
  5018. +#define IF_MODE_SGMII_SPEED_1G     0x0008
  5019. +#define IF_MODE_SGMII_DUPLEX_HALF  0x0010
  5020. +
  5021.  /* Num of additional exact match MAC adr regs */
  5022.  #define MEMAC_NUM_OF_PADDRS 7
  5023.  
  5024. @@ -278,6 +308,9 @@ struct fman_mac {
  5025.     struct memac_regs __iomem *regs;
  5026.     /* MAC address of device */
  5027.     u64 addr;
  5028. +   /* Ethernet physical interface */
  5029. +   phy_interface_t phy_if;
  5030. +   u16 max_speed;
  5031.     struct mac_device *dev_id; /* device cookie used by the exception cbs */
  5032.     fman_mac_exception_cb *exception_cb;
  5033.     fman_mac_exception_cb *event_cb;
  5034. @@ -290,12 +323,9 @@ struct fman_mac {
  5035.     struct memac_cfg *memac_drv_param;
  5036.     void *fm;
  5037.     struct fman_rev_info fm_rev_info;
  5038. -   struct phy *serdes;
  5039. -   struct phylink_pcs *sgmii_pcs;
  5040. -   struct phylink_pcs *qsgmii_pcs;
  5041. -   struct phylink_pcs *xfi_pcs;
  5042. +   bool basex_if;
  5043. +   struct phy_device *pcsphy;
  5044.     bool allmulti_enabled;
  5045. -   bool rgmii_no_half_duplex;
  5046.  };
  5047.  
  5048.  static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
  5049. @@ -353,6 +383,7 @@ static void set_exception(struct memac_r
  5050.  }
  5051.  
  5052.  static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
  5053. +       phy_interface_t phy_if, u16 speed, bool slow_10g_if,
  5054.         u32 exceptions)
  5055.  {
  5056.     u32 tmp;
  5057. @@ -380,6 +411,41 @@ static int init(struct memac_regs __iome
  5058.     iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
  5059.     iowrite32be((u32)0, &regs->pause_thresh[0]);
  5060.  
  5061. +   /* IF_MODE */
  5062. +   tmp = 0;
  5063. +   switch (phy_if) {
  5064. +   case PHY_INTERFACE_MODE_XGMII:
  5065. +       tmp |= IF_MODE_10G;
  5066. +       break;
  5067. +   case PHY_INTERFACE_MODE_MII:
  5068. +       tmp |= IF_MODE_MII;
  5069. +       break;
  5070. +   default:
  5071. +       tmp |= IF_MODE_GMII;
  5072. +       if (phy_if == PHY_INTERFACE_MODE_RGMII ||
  5073. +           phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
  5074. +           phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
  5075. +           phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
  5076. +           tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
  5077. +   }
  5078. +   iowrite32be(tmp, &regs->if_mode);
  5079. +
  5080. +   /* TX_FIFO_SECTIONS */
  5081. +   tmp = 0;
  5082. +   if (phy_if == PHY_INTERFACE_MODE_XGMII) {
  5083. +       if (slow_10g_if) {
  5084. +           tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
  5085. +               TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
  5086. +       } else {
  5087. +           tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
  5088. +               TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
  5089. +       }
  5090. +   } else {
  5091. +       tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
  5092. +           TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
  5093. +   }
  5094. +   iowrite32be(tmp, &regs->tx_fifo_sections);
  5095. +
  5096.     /* clear all pending events and set-up interrupts */
  5097.     iowrite32be(0xffffffff, &regs->ievent);
  5098.     set_exception(regs, exceptions, true);
  5099. @@ -419,6 +485,93 @@ static u32 get_mac_addr_hash_code(u64 et
  5100.     return xor_val;
  5101.  }
  5102.  
  5103. +static void setup_sgmii_internal_phy(struct fman_mac *memac,
  5104. +                    struct fixed_phy_status *fixed_link)
  5105. +{
  5106. +   u16 tmp_reg16;
  5107. +
  5108. +   if (WARN_ON(!memac->pcsphy))
  5109. +       return;
  5110. +
  5111. +   /* SGMII mode */
  5112. +   tmp_reg16 = IF_MODE_SGMII_EN;
  5113. +   if (!fixed_link)
  5114. +       /* AN enable */
  5115. +       tmp_reg16 |= IF_MODE_USE_SGMII_AN;
  5116. +   else {
  5117. +       switch (fixed_link->speed) {
  5118. +       case 10:
  5119. +           /* For 10M: IF_MODE[SPEED_10M] = 0 */
  5120. +       break;
  5121. +       case 100:
  5122. +           tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
  5123. +       break;
  5124. +       case 1000:
  5125. +       default:
  5126. +           tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
  5127. +       break;
  5128. +       }
  5129. +       if (!fixed_link->duplex)
  5130. +           tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
  5131. +   }
  5132. +   phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
  5133. +
  5134. +   /* Device ability according to SGMII specification */
  5135. +   tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
  5136. +   phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
  5137. +
  5138. +   /* Adjust link timer for SGMII  -
  5139. +    * According to Cisco SGMII specification the timer should be 1.6 ms.
  5140. +    * The link_timer register is configured in units of the clock.
  5141. +    * - When running as 1G SGMII, Serdes clock is 125 MHz, so
  5142. +    * unit = 1 / (125*10^6 Hz) = 8 ns.
  5143. +    * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
  5144. +    * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
  5145. +    * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
  5146. +    * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
  5147. +    * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
  5148. +    * we always set up here a value of 2.5 SGMII.
  5149. +    */
  5150. +   phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
  5151. +   phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
  5152. +
  5153. +   if (!fixed_link)
  5154. +       /* Restart AN */
  5155. +       tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
  5156. +   else
  5157. +       /* AN disabled */
  5158. +       tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
  5159. +   phy_write(memac->pcsphy, 0x0, tmp_reg16);
  5160. +}
  5161. +
  5162. +static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
  5163. +{
  5164. +   u16 tmp_reg16;
  5165. +
  5166. +   /* AN Device capability  */
  5167. +   tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
  5168. +   phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
  5169. +
  5170. +   /* Adjust link timer for SGMII  -
  5171. +    * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
  5172. +    * The link_timer register is configured in units of the clock.
  5173. +    * - When running as 1G SGMII, Serdes clock is 125 MHz, so
  5174. +    * unit = 1 / (125*10^6 Hz) = 8 ns.
  5175. +    * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
  5176. +    * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
  5177. +    * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
  5178. +    * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
  5179. +    * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
  5180. +    * we always set up here a value of 2.5 SGMII.
  5181. +    */
  5182. +   phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
  5183. +   phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
  5184. +
  5185. +   /* Restart AN */
  5186. +   tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
  5187. +   phy_write(memac->pcsphy, 0x0, tmp_reg16);
  5188. +}
  5189. +
  5190.  static int check_init_parameters(struct fman_mac *memac)
  5191.  {
  5192.     if (!memac->exception_cb) {
  5193. @@ -524,31 +677,41 @@ static void free_init_resources(struct f
  5194.     memac->unicast_addr_hash = NULL;
  5195.  }
  5196.  
  5197. +static bool is_init_done(struct memac_cfg *memac_drv_params)
  5198. +{
  5199. +   /* Checks if mEMAC driver parameters were initialized */
  5200. +   if (!memac_drv_params)
  5201. +       return true;
  5202. +
  5203. +   return false;
  5204. +}
  5205. +
  5206.  static int memac_enable(struct fman_mac *memac)
  5207.  {
  5208. -   int ret;
  5209. +   struct memac_regs __iomem *regs = memac->regs;
  5210. +   u32 tmp;
  5211.  
  5212. -   ret = phy_init(memac->serdes);
  5213. -   if (ret) {
  5214. -       dev_err(memac->dev_id->dev,
  5215. -           "could not initialize serdes: %pe\n", ERR_PTR(ret));
  5216. -       return ret;
  5217. -   }
  5218. +   if (!is_init_done(memac->memac_drv_param))
  5219. +       return -EINVAL;
  5220.  
  5221. -   ret = phy_power_on(memac->serdes);
  5222. -   if (ret) {
  5223. -       dev_err(memac->dev_id->dev,
  5224. -           "could not power on serdes: %pe\n", ERR_PTR(ret));
  5225. -       phy_exit(memac->serdes);
  5226. -   }
  5227. +   tmp = ioread32be(&regs->command_config);
  5228. +   tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
  5229. +   iowrite32be(tmp, &regs->command_config);
  5230.  
  5231. -   return ret;
  5232. +   return 0;
  5233.  }
  5234.  
  5235.  static void memac_disable(struct fman_mac *memac)
  5236. +
  5237.  {
  5238. -   phy_power_off(memac->serdes);
  5239. -   phy_exit(memac->serdes);
  5240. +   struct memac_regs __iomem *regs = memac->regs;
  5241. +   u32 tmp;
  5242. +
  5243. +   WARN_ON_ONCE(!is_init_done(memac->memac_drv_param));
  5244. +
  5245. +   tmp = ioread32be(&regs->command_config);
  5246. +   tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
  5247. +   iowrite32be(tmp, &regs->command_config);
  5248.  }
  5249.  
  5250.  static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
  5251. @@ -556,6 +719,9 @@ static int memac_set_promiscuous(struct
  5252.     struct memac_regs __iomem *regs = memac->regs;
  5253.     u32 tmp;
  5254.  
  5255. +   if (!is_init_done(memac->memac_drv_param))
  5256. +       return -EINVAL;
  5257. +
  5258.     tmp = ioread32be(&regs->command_config);
  5259.     if (new_val)
  5260.         tmp |= CMD_CFG_PROMIS_EN;
  5261. @@ -567,12 +733,73 @@ static int memac_set_promiscuous(struct
  5262.     return 0;
  5263.  }
  5264.  
  5265. +static int memac_adjust_link(struct fman_mac *memac, u16 speed)
  5266. +{
  5267. +   struct memac_regs __iomem *regs = memac->regs;
  5268. +   u32 tmp;
  5269. +
  5270. +   if (!is_init_done(memac->memac_drv_param))
  5271. +       return -EINVAL;
  5272. +
  5273. +   tmp = ioread32be(&regs->if_mode);
  5274. +
  5275. +   /* Set full duplex */
  5276. +   tmp &= ~IF_MODE_HD;
  5277. +
  5278. +   if (phy_interface_mode_is_rgmii(memac->phy_if)) {
  5279. +       /* Configure RGMII in manual mode */
  5280. +       tmp &= ~IF_MODE_RGMII_AUTO;
  5281. +       tmp &= ~IF_MODE_RGMII_SP_MASK;
  5282. +       /* Full duplex */
  5283. +       tmp |= IF_MODE_RGMII_FD;
  5284. +
  5285. +       switch (speed) {
  5286. +       case SPEED_1000:
  5287. +           tmp |= IF_MODE_RGMII_1000;
  5288. +           break;
  5289. +       case SPEED_100:
  5290. +           tmp |= IF_MODE_RGMII_100;
  5291. +           break;
  5292. +       case SPEED_10:
  5293. +           tmp |= IF_MODE_RGMII_10;
  5294. +           break;
  5295. +       default:
  5296. +           break;
  5297. +       }
  5298. +   }
  5299. +
  5300. +   iowrite32be(tmp, &regs->if_mode);
  5301. +
  5302. +   return 0;
  5303. +}
  5304. +
  5305. +static void adjust_link_memac(struct mac_device *mac_dev)
  5306. +{
  5307. +   struct phy_device *phy_dev = mac_dev->phy_dev;
  5308. +   struct fman_mac *fman_mac;
  5309. +   bool rx_pause, tx_pause;
  5310. +   int err;
  5311. +
  5312. +   fman_mac = mac_dev->fman_mac;
  5313. +   memac_adjust_link(fman_mac, phy_dev->speed);
  5314. +   mac_dev->update_speed(mac_dev, phy_dev->speed);
  5315. +
  5316. +   fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
  5317. +   err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
  5318. +   if (err < 0)
  5319. +       dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
  5320. +           err);
  5321. +}
  5322. +
  5323.  static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
  5324.                      u16 pause_time, u16 thresh_time)
  5325.  {
  5326.     struct memac_regs __iomem *regs = memac->regs;
  5327.     u32 tmp;
  5328.  
  5329. +   if (!is_init_done(memac->memac_drv_param))
  5330. +       return -EINVAL;
  5331. +
  5332.     tmp = ioread32be(&regs->tx_fifo_sections);
  5333.  
  5334.     GET_TX_EMPTY_DEFAULT_VALUE(tmp);
  5335. @@ -607,6 +834,9 @@ static int memac_accept_rx_pause_frames(
  5336.     struct memac_regs __iomem *regs = memac->regs;
  5337.     u32 tmp;
  5338.  
  5339. +   if (!is_init_done(memac->memac_drv_param))
  5340. +       return -EINVAL;
  5341. +
  5342.     tmp = ioread32be(&regs->command_config);
  5343.     if (en)
  5344.         tmp &= ~CMD_CFG_PAUSE_IGNORE;
  5345. @@ -618,175 +848,12 @@ static int memac_accept_rx_pause_frames(
  5346.     return 0;
  5347.  }
  5348.  
  5349. -static void memac_validate(struct phylink_config *config,
  5350. -              unsigned long *supported,
  5351. -              struct phylink_link_state *state)
  5352. -{
  5353. -   struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
  5354. -   unsigned long caps = config->mac_capabilities;
  5355. -
  5356. -   if (phy_interface_mode_is_rgmii(state->interface) &&
  5357. -       memac->rgmii_no_half_duplex)
  5358. -       caps &= ~(MAC_10HD | MAC_100HD);
  5359. -
  5360. -   phylink_validate_mask_caps(supported, state, caps);
  5361. -}
  5362. -
  5363. -/**
  5364. - * memac_if_mode() - Convert an interface mode into an IF_MODE config
  5365. - * @interface: A phy interface mode
  5366. - *
  5367. - * Return: A configuration word, suitable for programming into the lower bits
  5368. - *         of %IF_MODE.
  5369. - */
  5370. -static u32 memac_if_mode(phy_interface_t interface)
  5371. -{
  5372. -   switch (interface) {
  5373. -   case PHY_INTERFACE_MODE_MII:
  5374. -       return IF_MODE_MII;
  5375. -   case PHY_INTERFACE_MODE_RGMII:
  5376. -   case PHY_INTERFACE_MODE_RGMII_ID:
  5377. -   case PHY_INTERFACE_MODE_RGMII_RXID:
  5378. -   case PHY_INTERFACE_MODE_RGMII_TXID:
  5379. -       return IF_MODE_GMII | IF_MODE_RGMII;
  5380. -   case PHY_INTERFACE_MODE_SGMII:
  5381. -   case PHY_INTERFACE_MODE_1000BASEX:
  5382. -   case PHY_INTERFACE_MODE_QSGMII:
  5383. -       return IF_MODE_GMII;
  5384. -   case PHY_INTERFACE_MODE_10GBASER:
  5385. -       return IF_MODE_10G;
  5386. -   default:
  5387. -       WARN_ON_ONCE(1);
  5388. -       return 0;
  5389. -   }
  5390. -}
  5391. -
  5392. -static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
  5393. -                       phy_interface_t iface)
  5394. -{
  5395. -   struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
  5396. -
  5397. -   switch (iface) {
  5398. -   case PHY_INTERFACE_MODE_SGMII:
  5399. -   case PHY_INTERFACE_MODE_1000BASEX:
  5400. -       return memac->sgmii_pcs;
  5401. -   case PHY_INTERFACE_MODE_QSGMII:
  5402. -       return memac->qsgmii_pcs;
  5403. -   case PHY_INTERFACE_MODE_10GBASER:
  5404. -       return memac->xfi_pcs;
  5405. -   default:
  5406. -       return NULL;
  5407. -   }
  5408. -}
  5409. -
  5410. -static int memac_prepare(struct phylink_config *config, unsigned int mode,
  5411. -            phy_interface_t iface)
  5412. -{
  5413. -   struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
  5414. -
  5415. -   switch (iface) {
  5416. -   case PHY_INTERFACE_MODE_SGMII:
  5417. -   case PHY_INTERFACE_MODE_1000BASEX:
  5418. -   case PHY_INTERFACE_MODE_QSGMII:
  5419. -   case PHY_INTERFACE_MODE_10GBASER:
  5420. -       return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
  5421. -                   iface);
  5422. -   default:
  5423. -       return 0;
  5424. -   }
  5425. -}
  5426. -
  5427. -static void memac_mac_config(struct phylink_config *config, unsigned int mode,
  5428. -                const struct phylink_link_state *state)
  5429. -{
  5430. -   struct mac_device *mac_dev = fman_config_to_mac(config);
  5431. -   struct memac_regs __iomem *regs = mac_dev->fman_mac->regs;
  5432. -   u32 tmp = ioread32be(&regs->if_mode);
  5433. -
  5434. -   tmp &= ~(IF_MODE_MASK | IF_MODE_RGMII);
  5435. -   tmp |= memac_if_mode(state->interface);
  5436. -   if (phylink_autoneg_inband(mode))
  5437. -       tmp |= IF_MODE_RGMII_AUTO;
  5438. -   iowrite32be(tmp, &regs->if_mode);
  5439. -}
  5440. -
  5441. -static void memac_link_up(struct phylink_config *config, struct phy_device *phy,
  5442. -             unsigned int mode, phy_interface_t interface,
  5443. -             int speed, int duplex, bool tx_pause, bool rx_pause)
  5444. -{
  5445. -   struct mac_device *mac_dev = fman_config_to_mac(config);
  5446. -   struct fman_mac *memac = mac_dev->fman_mac;
  5447. -   struct memac_regs __iomem *regs = memac->regs;
  5448. -   u32 tmp = memac_if_mode(interface);
  5449. -   u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
  5450. -            FSL_FM_PAUSE_TIME_DISABLE;
  5451. -
  5452. -   memac_set_tx_pause_frames(memac, 0, pause_time, 0);
  5453. -   memac_accept_rx_pause_frames(memac, rx_pause);
  5454. -
  5455. -   if (duplex == DUPLEX_HALF)
  5456. -       tmp |= IF_MODE_HD;
  5457. -
  5458. -   switch (speed) {
  5459. -   case SPEED_1000:
  5460. -       tmp |= IF_MODE_RGMII_1000;
  5461. -       break;
  5462. -   case SPEED_100:
  5463. -       tmp |= IF_MODE_RGMII_100;
  5464. -       break;
  5465. -   case SPEED_10:
  5466. -       tmp |= IF_MODE_RGMII_10;
  5467. -       break;
  5468. -   }
  5469. -   iowrite32be(tmp, &regs->if_mode);
  5470. -
  5471. -   /* TODO: EEE? */
  5472. -
  5473. -   if (speed == SPEED_10000) {
  5474. -       if (memac->fm_rev_info.major == 6 &&
  5475. -           memac->fm_rev_info.minor == 4)
  5476. -           tmp = TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G;
  5477. -       else
  5478. -           tmp = TX_FIFO_SECTIONS_TX_AVAIL_10G;
  5479. -       tmp |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G;
  5480. -   } else {
  5481. -       tmp = TX_FIFO_SECTIONS_TX_AVAIL_1G |
  5482. -             TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G;
  5483. -   }
  5484. -   iowrite32be(tmp, &regs->tx_fifo_sections);
  5485. -
  5486. -   mac_dev->update_speed(mac_dev, speed);
  5487. -
  5488. -   tmp = ioread32be(&regs->command_config);
  5489. -   tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
  5490. -   iowrite32be(tmp, &regs->command_config);
  5491. -}
  5492. -
  5493. -static void memac_link_down(struct phylink_config *config, unsigned int mode,
  5494. -               phy_interface_t interface)
  5495. -{
  5496. -   struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
  5497. -   struct memac_regs __iomem *regs = memac->regs;
  5498. -   u32 tmp;
  5499. -
  5500. -   /* TODO: graceful */
  5501. -   tmp = ioread32be(&regs->command_config);
  5502. -   tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
  5503. -   iowrite32be(tmp, &regs->command_config);
  5504. -}
  5505. -
  5506. -static const struct phylink_mac_ops memac_mac_ops = {
  5507. -   .validate = memac_validate,
  5508. -   .mac_select_pcs = memac_select_pcs,
  5509. -   .mac_prepare = memac_prepare,
  5510. -   .mac_config = memac_mac_config,
  5511. -   .mac_link_up = memac_link_up,
  5512. -   .mac_link_down = memac_link_down,
  5513. -};
  5514. -
  5515.  static int memac_modify_mac_address(struct fman_mac *memac,
  5516.                     const enet_addr_t *enet_addr)
  5517.  {
  5518. +   if (!is_init_done(memac->memac_drv_param))
  5519. +       return -EINVAL;
  5520. +
  5521.     add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
  5522.  
  5523.     return 0;
  5524. @@ -800,6 +867,9 @@ static int memac_add_hash_mac_address(st
  5525.     u32 hash;
  5526.     u64 addr;
  5527.  
  5528. +   if (!is_init_done(memac->memac_drv_param))
  5529. +       return -EINVAL;
  5530. +
  5531.     addr = ENET_ADDR_TO_UINT64(*eth_addr);
  5532.  
  5533.     if (!(addr & GROUP_ADDRESS)) {
  5534. @@ -828,6 +898,9 @@ static int memac_set_allmulti(struct fma
  5535.     u32 entry;
  5536.     struct memac_regs __iomem *regs = memac->regs;
  5537.  
  5538. +   if (!is_init_done(memac->memac_drv_param))
  5539. +       return -EINVAL;
  5540. +
  5541.     if (enable) {
  5542.         for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
  5543.             iowrite32be(entry | HASH_CTRL_MCAST_EN,
  5544. @@ -857,6 +930,9 @@ static int memac_del_hash_mac_address(st
  5545.     u32 hash;
  5546.     u64 addr;
  5547.  
  5548. +   if (!is_init_done(memac->memac_drv_param))
  5549. +       return -EINVAL;
  5550. +
  5551.     addr = ENET_ADDR_TO_UINT64(*eth_addr);
  5552.  
  5553.     hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
  5554. @@ -884,6 +960,9 @@ static int memac_set_exception(struct fm
  5555.  {
  5556.     u32 bit_mask = 0;
  5557.  
  5558. +   if (!is_init_done(memac->memac_drv_param))
  5559. +       return -EINVAL;
  5560. +
  5561.     bit_mask = get_exception_flag(exception);
  5562.     if (bit_mask) {
  5563.         if (enable)
  5564. @@ -902,16 +981,25 @@ static int memac_set_exception(struct fm
  5565.  static int memac_init(struct fman_mac *memac)
  5566.  {
  5567.     struct memac_cfg *memac_drv_param;
  5568. +   u8 i;
  5569.     enet_addr_t eth_addr;
  5570. +   bool slow_10g_if = false;
  5571. +   struct fixed_phy_status *fixed_link = NULL;
  5572.     int err;
  5573.     u32 reg32 = 0;
  5574.  
  5575. +   if (is_init_done(memac->memac_drv_param))
  5576. +       return -EINVAL;
  5577. +
  5578.     err = check_init_parameters(memac);
  5579.     if (err)
  5580.         return err;
  5581.  
  5582.     memac_drv_param = memac->memac_drv_param;
  5583.  
  5584. +   if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
  5585. +       slow_10g_if = true;
  5586. +
  5587.     /* First, reset the MAC if desired. */
  5588.     if (memac_drv_param->reset_on_init) {
  5589.         err = reset(memac->regs);
  5590. @@ -927,7 +1015,10 @@ static int memac_init(struct fman_mac *m
  5591.         add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
  5592.     }
  5593.  
  5594. -   init(memac->regs, memac->memac_drv_param, memac->exceptions);
  5595. +   fixed_link = memac_drv_param->fixed_link;
  5596. +
  5597. +   init(memac->regs, memac->memac_drv_param, memac->phy_if,
  5598. +        memac->max_speed, slow_10g_if, memac->exceptions);
  5599.  
  5600.     /* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
  5601.      * Exists only in FMan 6.0 and 6.3.
  5602. @@ -943,6 +1034,33 @@ static int memac_init(struct fman_mac *m
  5603.         iowrite32be(reg32, &memac->regs->command_config);
  5604.     }
  5605.  
  5606. +   if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
  5607. +       /* Configure internal SGMII PHY */
  5608. +       if (memac->basex_if)
  5609. +           setup_sgmii_internal_phy_base_x(memac);
  5610. +       else
  5611. +           setup_sgmii_internal_phy(memac, fixed_link);
  5612. +   } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
  5613. +       /* Configure 4 internal SGMII PHYs */
  5614. +       for (i = 0; i < 4; i++) {
  5615. +           u8 qsmgii_phy_addr, phy_addr;
  5616. +           /* QSGMII PHY address occupies 3 upper bits of 5-bit
  5617. +            * phy_address; the lower 2 bits are used to extend
  5618. +            * register address space and access each one of 4
  5619. +            * ports inside QSGMII.
  5620. +            */
  5621. +           phy_addr = memac->pcsphy->mdio.addr;
  5622. +           qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
  5623. +           memac->pcsphy->mdio.addr = qsmgii_phy_addr;
  5624. +           if (memac->basex_if)
  5625. +               setup_sgmii_internal_phy_base_x(memac);
  5626. +           else
  5627. +               setup_sgmii_internal_phy(memac, fixed_link);
  5628. +
  5629. +           memac->pcsphy->mdio.addr = phy_addr;
  5630. +       }
  5631. +   }
  5632. +
  5633.     /* Max Frame Length */
  5634.     err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
  5635.                      memac_drv_param->max_frame_length);
  5636. @@ -971,28 +1089,19 @@ static int memac_init(struct fman_mac *m
  5637.     fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
  5638.                FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
  5639.  
  5640. -   return 0;
  5641. -}
  5642. -
  5643. -static void pcs_put(struct phylink_pcs *pcs)
  5644. -{
  5645. -   struct mdio_device *mdiodev;
  5646. -
  5647. -   if (IS_ERR_OR_NULL(pcs))
  5648. -       return;
  5649. +   kfree(memac_drv_param);
  5650. +   memac->memac_drv_param = NULL;
  5651.  
  5652. -   mdiodev = lynx_get_mdio_device(pcs);
  5653. -   lynx_pcs_destroy(pcs);
  5654. -   mdio_device_free(mdiodev);
  5655. +   return 0;
  5656.  }
  5657.  
  5658.  static int memac_free(struct fman_mac *memac)
  5659.  {
  5660.     free_init_resources(memac);
  5661.  
  5662. -   pcs_put(memac->sgmii_pcs);
  5663. -   pcs_put(memac->qsgmii_pcs);
  5664. -   pcs_put(memac->xfi_pcs);
  5665. +   if (memac->pcsphy)
  5666. +       put_device(&memac->pcsphy->mdio.dev);
  5667. +
  5668.     kfree(memac->memac_drv_param);
  5669.     kfree(memac);
  5670.  
  5671. @@ -1025,6 +1134,8 @@ static struct fman_mac *memac_config(str
  5672.     memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
  5673.  
  5674.     memac->regs = mac_dev->vaddr;
  5675. +   memac->max_speed = params->max_speed;
  5676. +   memac->phy_if = mac_dev->phy_if;
  5677.     memac->mac_id = params->mac_id;
  5678.     memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
  5679.                  MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
  5680. @@ -1032,6 +1143,7 @@ static struct fman_mac *memac_config(str
  5681.     memac->event_cb = params->event_cb;
  5682.     memac->dev_id = mac_dev;
  5683.     memac->fm = params->fm;
  5684. +   memac->basex_if = params->basex_if;
  5685.  
  5686.     /* Save FMan revision */
  5687.     fman_get_revision(memac->fm, &memac->fm_rev_info);
  5688. @@ -1039,221 +1151,101 @@ static struct fman_mac *memac_config(str
  5689.     return memac;
  5690.  }
  5691.  
  5692. -static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
  5693. -                       int index)
  5694. -{
  5695. -   struct device_node *node;
  5696. -   struct mdio_device *mdiodev = NULL;
  5697. -   struct phylink_pcs *pcs;
  5698. -
  5699. -   node = of_parse_phandle(mac_node, "pcsphy-handle", index);
  5700. -   if (node && of_device_is_available(node))
  5701. -       mdiodev = of_mdio_find_device(node);
  5702. -   of_node_put(node);
  5703. -
  5704. -   if (!mdiodev)
  5705. -       return ERR_PTR(-EPROBE_DEFER);
  5706. -
  5707. -   pcs = lynx_pcs_create(mdiodev);
  5708. -   return pcs;
  5709. -}
  5710. -
  5711. -static bool memac_supports(struct mac_device *mac_dev, phy_interface_t iface)
  5712. -{
  5713. -   /* If there's no serdes device, assume that it's been configured for
  5714. -    * whatever the default interface mode is.
  5715. -    */
  5716. -   if (!mac_dev->fman_mac->serdes)
  5717. -       return mac_dev->phy_if == iface;
  5718. -   /* Otherwise, ask the serdes */
  5719. -   return !phy_validate(mac_dev->fman_mac->serdes, PHY_MODE_ETHERNET,
  5720. -                iface, NULL);
  5721. -}
  5722. -
  5723.  int memac_initialization(struct mac_device *mac_dev,
  5724.              struct device_node *mac_node,
  5725.              struct fman_mac_params *params)
  5726.  {
  5727.     int          err;
  5728. -   struct device_node      *fixed;
  5729. -   struct phylink_pcs  *pcs;
  5730. +   struct device_node  *phy_node;
  5731. +   struct fixed_phy_status *fixed_link;
  5732.     struct fman_mac     *memac;
  5733. -   unsigned long        capabilities;
  5734. -   unsigned long       *supported;
  5735.  
  5736. -   mac_dev->phylink_ops        = &memac_mac_ops;
  5737.     mac_dev->set_promisc        = memac_set_promiscuous;
  5738.     mac_dev->change_addr        = memac_modify_mac_address;
  5739.     mac_dev->add_hash_mac_addr  = memac_add_hash_mac_address;
  5740.     mac_dev->remove_hash_mac_addr   = memac_del_hash_mac_address;
  5741. +   mac_dev->set_tx_pause       = memac_set_tx_pause_frames;
  5742. +   mac_dev->set_rx_pause       = memac_accept_rx_pause_frames;
  5743.     mac_dev->set_exception      = memac_set_exception;
  5744.     mac_dev->set_allmulti       = memac_set_allmulti;
  5745.     mac_dev->set_tstamp     = memac_set_tstamp;
  5746.     mac_dev->set_multi      = fman_set_multi;
  5747. +   mac_dev->adjust_link            = adjust_link_memac;
  5748.     mac_dev->enable         = memac_enable;
  5749.     mac_dev->disable        = memac_disable;
  5750.  
  5751. +   if (params->max_speed == SPEED_10000)
  5752. +       mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII;
  5753. +
  5754.     mac_dev->fman_mac = memac_config(mac_dev, params);
  5755. -   if (!mac_dev->fman_mac)
  5756. -       return -EINVAL;
  5757. +   if (!mac_dev->fman_mac) {
  5758. +       err = -EINVAL;
  5759. +       goto _return;
  5760. +   }
  5761.  
  5762.     memac = mac_dev->fman_mac;
  5763.     memac->memac_drv_param->max_frame_length = fman_get_max_frm();
  5764.     memac->memac_drv_param->reset_on_init = true;
  5765. -
  5766. -   err = of_property_match_string(mac_node, "pcs-handle-names", "xfi");
  5767. -   if (err >= 0) {
  5768. -       memac->xfi_pcs = memac_pcs_create(mac_node, err);
  5769. -       if (IS_ERR(memac->xfi_pcs)) {
  5770. -           err = PTR_ERR(memac->xfi_pcs);
  5771. -           dev_err_probe(mac_dev->dev, err, "missing xfi pcs\n");
  5772. +   if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
  5773. +       memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
  5774. +       phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0);
  5775. +       if (!phy_node) {
  5776. +           pr_err("PCS PHY node is not available\n");
  5777. +           err = -EINVAL;
  5778.             goto _return_fm_mac_free;
  5779.         }
  5780. -   } else if (err != -EINVAL && err != -ENODATA) {
  5781. -       goto _return_fm_mac_free;
  5782. -   }
  5783.  
  5784. -   err = of_property_match_string(mac_node, "pcs-handle-names", "qsgmii");
  5785. -   if (err >= 0) {
  5786. -       memac->qsgmii_pcs = memac_pcs_create(mac_node, err);
  5787. -       if (IS_ERR(memac->qsgmii_pcs)) {
  5788. -           err = PTR_ERR(memac->qsgmii_pcs);
  5789. -           dev_err_probe(mac_dev->dev, err,
  5790. -                     "missing qsgmii pcs\n");
  5791. +       memac->pcsphy = of_phy_find_device(phy_node);
  5792. +       if (!memac->pcsphy) {
  5793. +           pr_err("of_phy_find_device (PCS PHY) failed\n");
  5794. +           err = -EINVAL;
  5795.             goto _return_fm_mac_free;
  5796.         }
  5797. -   } else if (err != -EINVAL && err != -ENODATA) {
  5798. -       goto _return_fm_mac_free;
  5799.     }
  5800.  
  5801. -   /* For compatibility, if pcs-handle-names is missing, we assume this
  5802. -    * phy is the first one in pcsphy-handle
  5803. -    */
  5804. -   err = of_property_match_string(mac_node, "pcs-handle-names", "sgmii");
  5805. -   if (err == -EINVAL || err == -ENODATA)
  5806. -       pcs = memac_pcs_create(mac_node, 0);
  5807. -   else if (err < 0)
  5808. -       goto _return_fm_mac_free;
  5809. -   else
  5810. -       pcs = memac_pcs_create(mac_node, err);
  5811. -
  5812. -   if (IS_ERR(pcs)) {
  5813. -       err = PTR_ERR(pcs);
  5814. -       dev_err_probe(mac_dev->dev, err, "missing pcs\n");
  5815. -       goto _return_fm_mac_free;
  5816. -   }
  5817. +   if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
  5818. +       struct phy_device *phy;
  5819.  
  5820. -   /* If err is set here, it means that pcs-handle-names was missing above
  5821. -    * (and therefore that xfi_pcs cannot be set). If we are defaulting to
  5822. -    * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
  5823. -    */
  5824. -   if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
  5825. -       memac->xfi_pcs = pcs;
  5826. -   else
  5827. -       memac->sgmii_pcs = pcs;
  5828. +       err = of_phy_register_fixed_link(mac_node);
  5829. +       if (err)
  5830. +           goto _return_fm_mac_free;
  5831.  
  5832. -   memac->serdes = devm_of_phy_get(mac_dev->dev, mac_node, "serdes");
  5833. -   err = PTR_ERR(memac->serdes);
  5834. -   if (err == -ENODEV || err == -ENOSYS) {
  5835. -       dev_dbg(mac_dev->dev, "could not get (optional) serdes\n");
  5836. -       memac->serdes = NULL;
  5837. -   } else if (IS_ERR(memac->serdes)) {
  5838. -       dev_err_probe(mac_dev->dev, err, "could not get serdes\n");
  5839. -       goto _return_fm_mac_free;
  5840. -   }
  5841. -
  5842. -   /* The internal connection to the serdes is XGMII, but this isn't
  5843. -    * really correct for the phy mode (which is the external connection).
  5844. -    * However, this is how all older device trees say that they want
  5845. -    * 10GBASE-R (aka XFI), so just convert it for them.
  5846. -    */
  5847. -   if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
  5848. -       mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
  5849. +       fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL);
  5850. +       if (!fixed_link) {
  5851. +           err = -ENOMEM;
  5852. +           goto _return_fm_mac_free;
  5853. +       }
  5854.  
  5855. -   /* TODO: The following interface modes are supported by (some) hardware
  5856. -    * but not by this driver:
  5857. -    * - 1000BASE-KX
  5858. -    * - 10GBASE-KR
  5859. -    * - XAUI/HiGig
  5860. -    */
  5861. -   supported = mac_dev->phylink_config.supported_interfaces;
  5862. +       mac_dev->phy_node = of_node_get(mac_node);
  5863. +       phy = of_phy_find_device(mac_dev->phy_node);
  5864. +       if (!phy) {
  5865. +           err = -EINVAL;
  5866. +           of_node_put(mac_dev->phy_node);
  5867. +           goto _return_fixed_link_free;
  5868. +       }
  5869.  
  5870. -   /* Note that half duplex is only supported on 10/100M interfaces. */
  5871. +       fixed_link->link = phy->link;
  5872. +       fixed_link->speed = phy->speed;
  5873. +       fixed_link->duplex = phy->duplex;
  5874. +       fixed_link->pause = phy->pause;
  5875. +       fixed_link->asym_pause = phy->asym_pause;
  5876.  
  5877. -   if (memac->sgmii_pcs &&
  5878. -       (memac_supports(mac_dev, PHY_INTERFACE_MODE_SGMII) ||
  5879. -        memac_supports(mac_dev, PHY_INTERFACE_MODE_1000BASEX))) {
  5880. -       __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
  5881. -       __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
  5882. -   }
  5883. -
  5884. -   if (memac->sgmii_pcs &&
  5885. -       memac_supports(mac_dev, PHY_INTERFACE_MODE_2500BASEX))
  5886. -       __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
  5887. -
  5888. -   if (memac->qsgmii_pcs &&
  5889. -       memac_supports(mac_dev, PHY_INTERFACE_MODE_QSGMII))
  5890. -       __set_bit(PHY_INTERFACE_MODE_QSGMII, supported);
  5891. -   else if (mac_dev->phy_if == PHY_INTERFACE_MODE_QSGMII)
  5892. -       dev_warn(mac_dev->dev, "no QSGMII pcs specified\n");
  5893. -
  5894. -   if (memac->xfi_pcs &&
  5895. -       memac_supports(mac_dev, PHY_INTERFACE_MODE_10GBASER)) {
  5896. -       __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
  5897. -   } else {
  5898. -       /* From what I can tell, no 10g macs support RGMII. */
  5899. -       phy_interface_set_rgmii(supported);
  5900. -       __set_bit(PHY_INTERFACE_MODE_MII, supported);
  5901. +       put_device(&phy->mdio.dev);
  5902. +       memac->memac_drv_param->fixed_link = fixed_link;
  5903.     }
  5904.  
  5905. -   capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100;
  5906. -   capabilities |= MAC_1000FD | MAC_2500FD | MAC_10000FD;
  5907. -
  5908. -   /* These SoCs don't support half duplex at all; there's no different
  5909. -    * FMan version or compatible, so we just have to check the machine
  5910. -    * compatible instead
  5911. -    */
  5912. -   if (of_machine_is_compatible("fsl,ls1043a") ||
  5913. -       of_machine_is_compatible("fsl,ls1046a") ||
  5914. -       of_machine_is_compatible("fsl,B4QDS"))
  5915. -       capabilities &= ~(MAC_10HD | MAC_100HD);
  5916. -
  5917. -   mac_dev->phylink_config.mac_capabilities = capabilities;
  5918. -
  5919. -   /* The T2080 and T4240 don't support half duplex RGMII. There is no
  5920. -    * other way to identify these SoCs, so just use the machine
  5921. -    * compatible.
  5922. -    */
  5923. -   if (of_machine_is_compatible("fsl,T2080QDS") ||
  5924. -       of_machine_is_compatible("fsl,T2080RDB") ||
  5925. -       of_machine_is_compatible("fsl,T2081QDS") ||
  5926. -       of_machine_is_compatible("fsl,T4240QDS") ||
  5927. -       of_machine_is_compatible("fsl,T4240RDB"))
  5928. -       memac->rgmii_no_half_duplex = true;
  5929. -
  5930. -   /* Most boards should use MLO_AN_INBAND, but existing boards don't have
  5931. -    * a managed property. Default to MLO_AN_INBAND if nothing else is
  5932. -    * specified. We need to be careful and not enable this if we have a
  5933. -    * fixed link or if we are using MII or RGMII, since those
  5934. -    * configurations modes don't use in-band autonegotiation.
  5935. -    */
  5936. -   fixed = of_get_child_by_name(mac_node, "fixed-link");
  5937. -   if (!fixed && !of_property_read_bool(mac_node, "fixed-link") &&
  5938. -       !of_property_read_bool(mac_node, "managed") &&
  5939. -       mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
  5940. -       !phy_interface_mode_is_rgmii(mac_dev->phy_if))
  5941. -       mac_dev->phylink_config.ovr_an_inband = true;
  5942. -   of_node_put(fixed);
  5943. -
  5944.     err = memac_init(mac_dev->fman_mac);
  5945.     if (err < 0)
  5946. -       goto _return_fm_mac_free;
  5947. +       goto _return_fixed_link_free;
  5948.  
  5949.     dev_info(mac_dev->dev, "FMan MEMAC\n");
  5950.  
  5951. -   return 0;
  5952. +   goto _return;
  5953.  
  5954. +_return_fixed_link_free:
  5955. +   kfree(fixed_link);
  5956.  _return_fm_mac_free:
  5957.     memac_free(mac_dev->fman_mac);
  5958. +_return:
  5959.     return err;
  5960.  }
  5961. diff -rupN a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
  5962. --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c   2022-12-25 22:41:39.000000000 +0100
  5963. +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c   2022-12-31 15:56:55.299955264 +0100
  5964. @@ -13,7 +13,6 @@
  5965.  #include <linux/bitrev.h>
  5966.  #include <linux/io.h>
  5967.  #include <linux/crc32.h>
  5968. -#include <linux/netdevice.h>
  5969.  
  5970.  /* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
  5971.  #define TGEC_TX_IPG_LENGTH_MASK    0x000003ff
  5972. @@ -244,6 +243,10 @@ static int init(struct tgec_regs __iomem
  5973.  
  5974.  static int check_init_parameters(struct fman_mac *tgec)
  5975.  {
  5976. +   if (tgec->max_speed < SPEED_10000) {
  5977. +       pr_err("10G MAC driver only support 10G speed\n");
  5978. +       return -EINVAL;
  5979. +   }
  5980.     if (!tgec->exception_cb) {
  5981.         pr_err("uninitialized exception_cb\n");
  5982.         return -EINVAL;
  5983. @@ -381,13 +384,40 @@ static void free_init_resources(struct f
  5984.     tgec->unicast_addr_hash = NULL;
  5985.  }
  5986.  
  5987. +static bool is_init_done(struct tgec_cfg *cfg)
  5988. +{
  5989. +   /* Checks if tGEC driver parameters were initialized */
  5990. +   if (!cfg)
  5991. +       return true;
  5992. +
  5993. +   return false;
  5994. +}
  5995. +
  5996.  static int tgec_enable(struct fman_mac *tgec)
  5997.  {
  5998. +   struct tgec_regs __iomem *regs = tgec->regs;
  5999. +   u32 tmp;
  6000. +
  6001. +   if (!is_init_done(tgec->cfg))
  6002. +       return -EINVAL;
  6003. +
  6004. +   tmp = ioread32be(&regs->command_config);
  6005. +   tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
  6006. +   iowrite32be(tmp, &regs->command_config);
  6007. +
  6008.     return 0;
  6009.  }
  6010.  
  6011.  static void tgec_disable(struct fman_mac *tgec)
  6012.  {
  6013. +   struct tgec_regs __iomem *regs = tgec->regs;
  6014. +   u32 tmp;
  6015. +
  6016. +   WARN_ON_ONCE(!is_init_done(tgec->cfg));
  6017. +
  6018. +   tmp = ioread32be(&regs->command_config);
  6019. +   tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
  6020. +   iowrite32be(tmp, &regs->command_config);
  6021.  }
  6022.  
  6023.  static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
  6024. @@ -395,6 +425,9 @@ static int tgec_set_promiscuous(struct f
  6025.     struct tgec_regs __iomem *regs = tgec->regs;
  6026.     u32 tmp;
  6027.  
  6028. +   if (!is_init_done(tgec->cfg))
  6029. +       return -EINVAL;
  6030. +
  6031.     tmp = ioread32be(&regs->command_config);
  6032.     if (new_val)
  6033.         tmp |= CMD_CFG_PROMIS_EN;
  6034. @@ -411,6 +444,9 @@ static int tgec_set_tx_pause_frames(stru
  6035.  {
  6036.     struct tgec_regs __iomem *regs = tgec->regs;
  6037.  
  6038. +   if (!is_init_done(tgec->cfg))
  6039. +       return -EINVAL;
  6040. +
  6041.     iowrite32be((u32)pause_time, &regs->pause_quant);
  6042.  
  6043.     return 0;
  6044. @@ -421,6 +457,9 @@ static int tgec_accept_rx_pause_frames(s
  6045.     struct tgec_regs __iomem *regs = tgec->regs;
  6046.     u32 tmp;
  6047.  
  6048. +   if (!is_init_done(tgec->cfg))
  6049. +       return -EINVAL;
  6050. +
  6051.     tmp = ioread32be(&regs->command_config);
  6052.     if (!en)
  6053.         tmp |= CMD_CFG_PAUSE_IGNORE;
  6054. @@ -431,52 +470,12 @@ static int tgec_accept_rx_pause_frames(s
  6055.     return 0;
  6056.  }
  6057.  
  6058. -static void tgec_mac_config(struct phylink_config *config, unsigned int mode,
  6059. -               const struct phylink_link_state *state)
  6060. -{
  6061. -}
  6062. -
  6063. -static void tgec_link_up(struct phylink_config *config, struct phy_device *phy,
  6064. -            unsigned int mode, phy_interface_t interface,
  6065. -            int speed, int duplex, bool tx_pause, bool rx_pause)
  6066. -{
  6067. -   struct mac_device *mac_dev = fman_config_to_mac(config);
  6068. -   struct fman_mac *tgec = mac_dev->fman_mac;
  6069. -   struct tgec_regs __iomem *regs = tgec->regs;
  6070. -   u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
  6071. -            FSL_FM_PAUSE_TIME_DISABLE;
  6072. -   u32 tmp;
  6073. -
  6074. -   tgec_set_tx_pause_frames(tgec, 0, pause_time, 0);
  6075. -   tgec_accept_rx_pause_frames(tgec, rx_pause);
  6076. -   mac_dev->update_speed(mac_dev, speed);
  6077. -
  6078. -   tmp = ioread32be(&regs->command_config);
  6079. -   tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
  6080. -   iowrite32be(tmp, &regs->command_config);
  6081. -}
  6082. -
  6083. -static void tgec_link_down(struct phylink_config *config, unsigned int mode,
  6084. -              phy_interface_t interface)
  6085. -{
  6086. -   struct fman_mac *tgec = fman_config_to_mac(config)->fman_mac;
  6087. -   struct tgec_regs __iomem *regs = tgec->regs;
  6088. -   u32 tmp;
  6089. -
  6090. -   tmp = ioread32be(&regs->command_config);
  6091. -   tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
  6092. -   iowrite32be(tmp, &regs->command_config);
  6093. -}
  6094. -
  6095. -static const struct phylink_mac_ops tgec_mac_ops = {
  6096. -   .mac_config = tgec_mac_config,
  6097. -   .mac_link_up = tgec_link_up,
  6098. -   .mac_link_down = tgec_link_down,
  6099. -};
  6100. -
  6101.  static int tgec_modify_mac_address(struct fman_mac *tgec,
  6102.                    const enet_addr_t *p_enet_addr)
  6103.  {
  6104. +   if (!is_init_done(tgec->cfg))
  6105. +       return -EINVAL;
  6106. +
  6107.     tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
  6108.     set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
  6109.  
  6110. @@ -491,6 +490,9 @@ static int tgec_add_hash_mac_address(str
  6111.     u32 crc = 0xFFFFFFFF, hash;
  6112.     u64 addr;
  6113.  
  6114. +   if (!is_init_done(tgec->cfg))
  6115. +       return -EINVAL;
  6116. +
  6117.     addr = ENET_ADDR_TO_UINT64(*eth_addr);
  6118.  
  6119.     if (!(addr & GROUP_ADDRESS)) {
  6120. @@ -523,6 +525,9 @@ static int tgec_set_allmulti(struct fman
  6121.     u32 entry;
  6122.     struct tgec_regs __iomem *regs = tgec->regs;
  6123.  
  6124. +   if (!is_init_done(tgec->cfg))
  6125. +       return -EINVAL;
  6126. +
  6127.     if (enable) {
  6128.         for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
  6129.             iowrite32be(entry | TGEC_HASH_MCAST_EN,
  6130. @@ -543,6 +548,9 @@ static int tgec_set_tstamp(struct fman_m
  6131.     struct tgec_regs __iomem *regs = tgec->regs;
  6132.     u32 tmp;
  6133.  
  6134. +   if (!is_init_done(tgec->cfg))
  6135. +       return -EINVAL;
  6136. +
  6137.     tmp = ioread32be(&regs->command_config);
  6138.  
  6139.     if (enable)
  6140. @@ -564,6 +572,9 @@ static int tgec_del_hash_mac_address(str
  6141.     u32 crc = 0xFFFFFFFF, hash;
  6142.     u64 addr;
  6143.  
  6144. +   if (!is_init_done(tgec->cfg))
  6145. +       return -EINVAL;
  6146. +
  6147.     addr = ((*(u64 *)eth_addr) >> 16);
  6148.  
  6149.     /* CRC calculation */
  6150. @@ -590,12 +601,22 @@ static int tgec_del_hash_mac_address(str
  6151.     return 0;
  6152.  }
  6153.  
  6154. +static void tgec_adjust_link(struct mac_device *mac_dev)
  6155. +{
  6156. +   struct phy_device *phy_dev = mac_dev->phy_dev;
  6157. +
  6158. +   mac_dev->update_speed(mac_dev, phy_dev->speed);
  6159. +}
  6160. +
  6161.  static int tgec_set_exception(struct fman_mac *tgec,
  6162.                   enum fman_mac_exceptions exception, bool enable)
  6163.  {
  6164.     struct tgec_regs __iomem *regs = tgec->regs;
  6165.     u32 bit_mask = 0;
  6166.  
  6167. +   if (!is_init_done(tgec->cfg))
  6168. +       return -EINVAL;
  6169. +
  6170.     bit_mask = get_exception_flag(exception);
  6171.     if (bit_mask) {
  6172.         if (enable)
  6173. @@ -620,6 +641,9 @@ static int tgec_init(struct fman_mac *tg
  6174.     enet_addr_t eth_addr;
  6175.     int err;
  6176.  
  6177. +   if (is_init_done(tgec->cfg))
  6178. +       return -EINVAL;
  6179. +
  6180.     if (DEFAULT_RESET_ON_INIT &&
  6181.         (fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
  6182.         pr_err("Can't reset MAC!\n");
  6183. @@ -729,6 +753,7 @@ static struct fman_mac *tgec_config(stru
  6184.  
  6185.     tgec->regs = mac_dev->vaddr;
  6186.     tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
  6187. +   tgec->max_speed = params->max_speed;
  6188.     tgec->mac_id = params->mac_id;
  6189.     tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT  |
  6190.                 TGEC_IMASK_REM_FAULT    |
  6191. @@ -763,15 +788,17 @@ int tgec_initialization(struct mac_devic
  6192.     int err;
  6193.     struct fman_mac     *tgec;
  6194.  
  6195. -   mac_dev->phylink_ops        = &tgec_mac_ops;
  6196.     mac_dev->set_promisc        = tgec_set_promiscuous;
  6197.     mac_dev->change_addr        = tgec_modify_mac_address;
  6198.     mac_dev->add_hash_mac_addr  = tgec_add_hash_mac_address;
  6199.     mac_dev->remove_hash_mac_addr   = tgec_del_hash_mac_address;
  6200. +   mac_dev->set_tx_pause       = tgec_set_tx_pause_frames;
  6201. +   mac_dev->set_rx_pause       = tgec_accept_rx_pause_frames;
  6202.     mac_dev->set_exception      = tgec_set_exception;
  6203.     mac_dev->set_allmulti       = tgec_set_allmulti;
  6204.     mac_dev->set_tstamp     = tgec_set_tstamp;
  6205.     mac_dev->set_multi      = fman_set_multi;
  6206. +   mac_dev->adjust_link            = tgec_adjust_link;
  6207.     mac_dev->enable         = tgec_enable;
  6208.     mac_dev->disable        = tgec_disable;
  6209.  
  6210. @@ -781,19 +808,6 @@ int tgec_initialization(struct mac_devic
  6211.         goto _return;
  6212.     }
  6213.  
  6214. -   /* The internal connection to the serdes is XGMII, but this isn't
  6215. -    * really correct for the phy mode (which is the external connection).
  6216. -    * However, this is how all older device trees say that they want
  6217. -    * XAUI, so just convert it for them.
  6218. -    */
  6219. -   if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
  6220. -       mac_dev->phy_if = PHY_INTERFACE_MODE_XAUI;
  6221. -
  6222. -   __set_bit(PHY_INTERFACE_MODE_XAUI,
  6223. -         mac_dev->phylink_config.supported_interfaces);
  6224. -   mac_dev->phylink_config.mac_capabilities =
  6225. -       MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10000FD;
  6226. -
  6227.     tgec = mac_dev->fman_mac;
  6228.     tgec->cfg->max_frame_length = fman_get_max_frm();
  6229.     err = tgec_init(tgec);
  6230. diff -rupN a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
  6231. --- a/drivers/net/ethernet/freescale/fman/mac.c 2022-12-25 22:41:39.000000000 +0100
  6232. +++ b/drivers/net/ethernet/freescale/fman/mac.c 2022-12-31 15:56:55.299955264 +0100
  6233. @@ -15,7 +15,6 @@
  6234.  #include <linux/phy.h>
  6235.  #include <linux/netdevice.h>
  6236.  #include <linux/phy_fixed.h>
  6237. -#include <linux/phylink.h>
  6238.  #include <linux/etherdevice.h>
  6239.  #include <linux/libfdt_env.h>
  6240.  
  6241. @@ -94,8 +93,130 @@ int fman_set_multi(struct net_device *ne
  6242.     return 0;
  6243.  }
  6244.  
  6245. +/**
  6246. + * fman_set_mac_active_pause
  6247. + * @mac_dev:   A pointer to the MAC device
  6248. + * @rx:        Pause frame setting for RX
  6249. + * @tx:        Pause frame setting for TX
  6250. + *
  6251. + * Set the MAC RX/TX PAUSE frames settings
  6252. + *
  6253. + * Avoid redundant calls to FMD, if the MAC driver already contains the desired
  6254. + * active PAUSE settings. Otherwise, the new active settings should be reflected
  6255. + * in FMan.
  6256. + *
  6257. + * Return: 0 on success; Error code otherwise.
  6258. + */
  6259. +int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
  6260. +{
  6261. +   struct fman_mac *fman_mac = mac_dev->fman_mac;
  6262. +   int err = 0;
  6263. +
  6264. +   if (rx != mac_dev->rx_pause_active) {
  6265. +       err = mac_dev->set_rx_pause(fman_mac, rx);
  6266. +       if (likely(err == 0))
  6267. +           mac_dev->rx_pause_active = rx;
  6268. +   }
  6269. +
  6270. +   if (tx != mac_dev->tx_pause_active) {
  6271. +       u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
  6272. +                    FSL_FM_PAUSE_TIME_DISABLE);
  6273. +
  6274. +       err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
  6275. +
  6276. +       if (likely(err == 0))
  6277. +           mac_dev->tx_pause_active = tx;
  6278. +   }
  6279. +
  6280. +   return err;
  6281. +}
  6282. +EXPORT_SYMBOL(fman_set_mac_active_pause);
  6283. +
  6284. +/**
  6285. + * fman_get_pause_cfg
  6286. + * @mac_dev:   A pointer to the MAC device
  6287. + * @rx_pause:  Return value for RX setting
  6288. + * @tx_pause:  Return value for TX setting
  6289. + *
  6290. + * Determine the MAC RX/TX PAUSE frames settings based on PHY
  6291. + * autonegotiation or values set by eththool.
  6292. + *
  6293. + * Return: Pointer to FMan device.
  6294. + */
  6295. +void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
  6296. +           bool *tx_pause)
  6297. +{
  6298. +   struct phy_device *phy_dev = mac_dev->phy_dev;
  6299. +   u16 lcl_adv, rmt_adv;
  6300. +   u8 flowctrl;
  6301. +
  6302. +   *rx_pause = *tx_pause = false;
  6303. +
  6304. +   if (!phy_dev->duplex)
  6305. +       return;
  6306. +
  6307. +   /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
  6308. +    * are those set by ethtool.
  6309. +    */
  6310. +   if (!mac_dev->autoneg_pause) {
  6311. +       *rx_pause = mac_dev->rx_pause_req;
  6312. +       *tx_pause = mac_dev->tx_pause_req;
  6313. +       return;
  6314. +   }
  6315. +
  6316. +   /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
  6317. +    * settings depend on the result of the link negotiation.
  6318. +    */
  6319. +
  6320. +   /* get local capabilities */
  6321. +   lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
  6322. +
  6323. +   /* get link partner capabilities */
  6324. +   rmt_adv = 0;
  6325. +   if (phy_dev->pause)
  6326. +       rmt_adv |= LPA_PAUSE_CAP;
  6327. +   if (phy_dev->asym_pause)
  6328. +       rmt_adv |= LPA_PAUSE_ASYM;
  6329. +
  6330. +   /* Calculate TX/RX settings based on local and peer advertised
  6331. +    * symmetric/asymmetric PAUSE capabilities.
  6332. +    */
  6333. +   flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
  6334. +   if (flowctrl & FLOW_CTRL_RX)
  6335. +       *rx_pause = true;
  6336. +   if (flowctrl & FLOW_CTRL_TX)
  6337. +       *tx_pause = true;
  6338. +}
  6339. +EXPORT_SYMBOL(fman_get_pause_cfg);
  6340. +
  6341. +#define DTSEC_SUPPORTED \
  6342. +   (SUPPORTED_10baseT_Half \
  6343. +   | SUPPORTED_10baseT_Full \
  6344. +   | SUPPORTED_100baseT_Half \
  6345. +   | SUPPORTED_100baseT_Full \
  6346. +   | SUPPORTED_Autoneg \
  6347. +   | SUPPORTED_Pause \
  6348. +   | SUPPORTED_Asym_Pause \
  6349. +   | SUPPORTED_FIBRE \
  6350. +   | SUPPORTED_MII)
  6351. +
  6352.  static DEFINE_MUTEX(eth_lock);
  6353.  
  6354. +static const u16 phy2speed[] = {
  6355. +   [PHY_INTERFACE_MODE_MII]        = SPEED_100,
  6356. +   [PHY_INTERFACE_MODE_GMII]       = SPEED_1000,
  6357. +   [PHY_INTERFACE_MODE_SGMII]      = SPEED_1000,
  6358. +   [PHY_INTERFACE_MODE_TBI]        = SPEED_1000,
  6359. +   [PHY_INTERFACE_MODE_RMII]       = SPEED_100,
  6360. +   [PHY_INTERFACE_MODE_RGMII]      = SPEED_1000,
  6361. +   [PHY_INTERFACE_MODE_RGMII_ID]       = SPEED_1000,
  6362. +   [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
  6363. +   [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
  6364. +   [PHY_INTERFACE_MODE_RTBI]       = SPEED_1000,
  6365. +   [PHY_INTERFACE_MODE_QSGMII]     = SPEED_1000,
  6366. +   [PHY_INTERFACE_MODE_XGMII]      = SPEED_10000
  6367. +};
  6368. +
  6369.  static struct platform_device *dpaa_eth_add_device(int fman_id,
  6370.                            struct mac_device *mac_dev)
  6371.  {
  6372. @@ -142,8 +263,8 @@ no_mem:
  6373.  }
  6374.  
  6375.  static const struct of_device_id mac_match[] = {
  6376. -   { .compatible   = "fsl,fman-dtsec", .data = dtsec_initialization },
  6377. -   { .compatible   = "fsl,fman-xgec", .data = tgec_initialization },
  6378. +   { .compatible   = "fsl,fman-dtsec", .data = dtsec_initialization },
  6379. +   { .compatible   = "fsl,fman-xgec", .data = tgec_initialization },
  6380.     { .compatible   = "fsl,fman-memac", .data = memac_initialization },
  6381.     {}
  6382.  };
  6383. @@ -174,7 +295,6 @@ static int mac_probe(struct platform_dev
  6384.     priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  6385.     if (!priv)
  6386.         return -ENOMEM;
  6387. -   platform_set_drvdata(_of_dev, mac_dev);
  6388.  
  6389.     /* Save private information */
  6390.     mac_dev->priv = priv;
  6391. @@ -304,21 +424,57 @@ static int mac_probe(struct platform_dev
  6392.     }
  6393.     mac_dev->phy_if = phy_if;
  6394.  
  6395. +   priv->speed     = phy2speed[mac_dev->phy_if];
  6396. +   params.max_speed    = priv->speed;
  6397. +   mac_dev->if_support = DTSEC_SUPPORTED;
  6398. +   /* We don't support half-duplex in SGMII mode */
  6399. +   if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
  6400. +       mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
  6401. +                   SUPPORTED_100baseT_Half);
  6402. +
  6403. +   /* Gigabit support (no half-duplex) */
  6404. +   if (params.max_speed == 1000)
  6405. +       mac_dev->if_support |= SUPPORTED_1000baseT_Full;
  6406. +
  6407. +   /* The 10G interface only supports one mode */
  6408. +   if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
  6409. +       mac_dev->if_support = SUPPORTED_10000baseT_Full;
  6410. +
  6411. +   /* Get the rest of the PHY information */
  6412. +   mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
  6413. +
  6414. +   params.basex_if     = false;
  6415.     params.mac_id       = priv->cell_index;
  6416.     params.fm       = (void *)priv->fman;
  6417.     params.exception_cb = mac_exception;
  6418.     params.event_cb     = mac_exception;
  6419.  
  6420.     err = init(mac_dev, mac_node, &params);
  6421. -   if (err < 0)
  6422. +   if (err < 0) {
  6423. +       dev_err(dev, "mac_dev->init() = %d\n", err);
  6424. +       of_node_put(mac_dev->phy_node);
  6425.         return err;
  6426. +   }
  6427. +
  6428. +   /* pause frame autonegotiation enabled */
  6429. +   mac_dev->autoneg_pause = true;
  6430. +
  6431. +   /* By intializing the values to false, force FMD to enable PAUSE frames
  6432. +    * on RX and TX
  6433. +    */
  6434. +   mac_dev->rx_pause_req = true;
  6435. +   mac_dev->tx_pause_req = true;
  6436. +   mac_dev->rx_pause_active = false;
  6437. +   mac_dev->tx_pause_active = false;
  6438. +   err = fman_set_mac_active_pause(mac_dev, true, true);
  6439. +   if (err < 0)
  6440. +       dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
  6441.  
  6442.     if (!is_zero_ether_addr(mac_dev->addr))
  6443.         dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
  6444.  
  6445.     priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
  6446.     if (IS_ERR(priv->eth_dev)) {
  6447. -       err = PTR_ERR(priv->eth_dev);
  6448.         dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
  6449.             priv->cell_index);
  6450.         priv->eth_dev = NULL;
  6451. diff -rupN a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
  6452. --- a/drivers/net/ethernet/freescale/fman/mac.h 2022-12-25 22:41:39.000000000 +0100
  6453. +++ b/drivers/net/ethernet/freescale/fman/mac.h 2022-12-31 15:56:55.299955264 +0100
  6454. @@ -9,7 +9,6 @@
  6455.  #include <linux/device.h>
  6456.  #include <linux/if_ether.h>
  6457.  #include <linux/phy.h>
  6458. -#include <linux/phylink.h>
  6459.  #include <linux/list.h>
  6460.  
  6461.  #include "fman_port.h"
  6462. @@ -25,22 +24,32 @@ struct mac_device {
  6463.     struct resource     *res;
  6464.     u8           addr[ETH_ALEN];
  6465.     struct fman_port    *port[2];
  6466. -   struct phylink      *phylink;
  6467. -   struct phylink_config   phylink_config;
  6468. +   u32          if_support;
  6469. +   struct phy_device   *phy_dev;
  6470.     phy_interface_t     phy_if;
  6471. +   struct device_node  *phy_node;
  6472. +   struct net_device   *net_dev;
  6473.  
  6474. +   bool autoneg_pause;
  6475. +   bool rx_pause_req;
  6476. +   bool tx_pause_req;
  6477. +   bool rx_pause_active;
  6478. +   bool tx_pause_active;
  6479.     bool promisc;
  6480.     bool allmulti;
  6481.  
  6482. -   const struct phylink_mac_ops *phylink_ops;
  6483.     int (*enable)(struct fman_mac *mac_dev);
  6484.     void (*disable)(struct fman_mac *mac_dev);
  6485. +   void (*adjust_link)(struct mac_device *mac_dev);
  6486.     int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
  6487.     int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
  6488.     int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
  6489.     int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
  6490.     int (*set_multi)(struct net_device *net_dev,
  6491.              struct mac_device *mac_dev);
  6492. +   int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
  6493. +   int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
  6494. +               u16 pause_time, u16 thresh_time);
  6495.     int (*set_exception)(struct fman_mac *mac_dev,
  6496.                  enum fman_mac_exceptions exception, bool enable);
  6497.     int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
  6498. @@ -54,12 +63,6 @@ struct mac_device {
  6499.     struct mac_priv_s   *priv;
  6500.  };
  6501.  
  6502. -static inline struct mac_device
  6503. -*fman_config_to_mac(struct phylink_config *config)
  6504. -{
  6505. -   return container_of(config, struct mac_device, phylink_config);
  6506. -}
  6507. -
  6508.  struct dpaa_eth_data {
  6509.     struct mac_device *mac_dev;
  6510.     int mac_hw_id;
  6511.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement