Advertisement
Guest User

Untitled

a guest
Aug 26th, 2021
245
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 12.44 KB | None | 0 0
  1. diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
  2. index 780d32dc7f..804f4273c0 100644
  3. --- a/drivers/net/octeontx2/otx2_ethdev.c
  4. +++ b/drivers/net/octeontx2/otx2_ethdev.c
  5. @@ -1899,6 +1899,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
  6.         nix_lf_free(dev);
  7.     }
  8.  
  9. +   rte_eth_burst_api[eth_dev->data->port_id].data = eth_dev->data;
  10.     dev->rx_offloads = rxmode->offloads;
  11.     dev->tx_offloads = txmode->offloads;
  12.     dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
  13. diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
  14. index f931fd10e1..fa8fae50ac 100644
  15. --- a/lib/ethdev/ethdev_driver.h
  16. +++ b/lib/ethdev/ethdev_driver.h
  17. @@ -21,20 +21,6 @@
  18.  extern "C" {
  19.  #endif
  20.  
  21. -/**
  22. - * @internal
  23. - * Structure used to hold information about the callbacks to be called for a
  24. - * queue on RX and TX.
  25. - */
  26. -struct rte_eth_rxtx_callback {
  27. -   struct rte_eth_rxtx_callback *next;
  28. -   union{
  29. -       rte_rx_callback_fn rx;
  30. -       rte_tx_callback_fn tx;
  31. -   } fn;
  32. -   void *param;
  33. -};
  34. -
  35.  /**
  36.   * @internal
  37.   * The generic data structure associated with each ethernet device.
  38. @@ -59,16 +45,6 @@ struct rte_eth_dev {
  39.     struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
  40.     /** User application callbacks for NIC interrupts */
  41.     struct rte_eth_dev_cb_list link_intr_cbs;
  42. -   /**
  43. -    * User-supplied functions called from rx_burst to post-process
  44. -    * received packets before passing them to the user
  45. -    */
  46. -   struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
  47. -   /**
  48. -    * User-supplied functions called from tx_burst to pre-process
  49. -    * received packets before passing them to the driver for transmission.
  50. -    */
  51. -   struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
  52.     enum rte_eth_dev_state state; /**< Flag indicating the port state */
  53.     void *security_ctx; /**< Context for security ops */
  54.  
  55. @@ -1667,9 +1643,8 @@ __rte_internal
  56.  static inline void *
  57.  _rte_eth_rx_prolog(uint16_t port_id, uint16_t queue_id)
  58.  {
  59. -   struct rte_eth_dev *dev;
  60. -
  61. -   dev = &rte_eth_devices[port_id];
  62. +   struct rte_eth_dev_data *data;
  63. +   data = rte_eth_burst_api[port_id].data;
  64.  
  65.  #ifdef RTE_ETHDEV_DEBUG_RX
  66.     RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
  67. @@ -1679,7 +1654,7 @@ _rte_eth_rx_prolog(uint16_t port_id, uint16_t queue_id)
  68.         return NULL;
  69.     }
  70.  #endif
  71. -   return dev->data->rx_queues[queue_id];
  72. +   return data->rx_queues[queue_id];
  73.  }
  74.  
  75.  /**
  76. @@ -1708,11 +1683,12 @@ static inline uint16_t
  77.  _rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id,
  78.     struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts)
  79.  {
  80. -   struct rte_eth_dev *dev;
  81. +   RTE_SET_USED(nb_pkts);
  82. +#if 0
  83. +   struct rte_eth_burst_api *api;
  84.  
  85. -   dev = &rte_eth_devices[port_id];
  86. +   api = &rte_eth_burst_api[port_id];
  87.  
  88. -#ifdef RTE_ETHDEV_RXTX_CALLBACKS
  89.     struct rte_eth_rxtx_callback *cb;
  90.  
  91.     /* __ATOMIC_RELEASE memory order was used when the
  92. @@ -1721,8 +1697,8 @@ _rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id,
  93.      * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
  94.      * not required.
  95.      */
  96. -   cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
  97. -               __ATOMIC_RELAXED);
  98. +   cb = __atomic_load_n(&api->post_rx_burst_cbs[queue_id],
  99. +                __ATOMIC_RELAXED);
  100.  
  101.     if (unlikely(cb != NULL)) {
  102.         do {
  103. @@ -1756,8 +1732,6 @@ _RTE_ETH_RX_PROTO(fn) \
  104.  { \
  105.     uint16_t nb_rx; \
  106.     void *rxq = _rte_eth_rx_prolog(port_id, queue_id); \
  107. -   if (rxq == NULL) \
  108. -       return 0; \
  109.     nb_rx = fn(rxq, rx_pkts, nb_pkts); \
  110.     return _rte_eth_rx_epilog(port_id, queue_id, rx_pkts, nb_rx, nb_pkts); \
  111.  }
  112. @@ -1794,11 +1768,9 @@ static inline void *
  113.  _rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
  114.         struct rte_mbuf **tx_pkts, uint16_t *nb_pkts)
  115.  {
  116. -   uint16_t n;
  117. -   struct rte_eth_dev *dev;
  118. +   struct rte_eth_burst_api *api;
  119.  
  120. -   n = *nb_pkts;
  121. -   dev = &rte_eth_devices[port_id];
  122. +   api = &rte_eth_burst_api[port_id];
  123.  
  124.  #ifdef RTE_ETHDEV_DEBUG_TX
  125.     RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
  126. @@ -1809,30 +1781,8 @@ _rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
  127.     }
  128.  #endif
  129.  
  130. -#ifdef RTE_ETHDEV_RXTX_CALLBACKS
  131. -   struct rte_eth_rxtx_callback *cb;
  132. -
  133. -   /* __ATOMIC_RELEASE memory order was used when the
  134. -    * call back was inserted into the list.
  135. -    * Since there is a clear dependency between loading
  136. -    * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
  137. -    * not required.
  138. -    */
  139. -   cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
  140. -               __ATOMIC_RELAXED);
  141. -
  142. -   if (unlikely(cb != NULL)) {
  143. -       do {
  144. -           n = cb->fn.tx(port_id, queue_id, tx_pkts, n, cb->param);
  145. -           cb = cb->next;
  146. -       } while (cb != NULL);
  147. -   }
  148. -
  149. -   *nb_pkts = n;
  150. -#endif
  151. -
  152. -   rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, n);
  153. -   return dev->data->tx_queues[queue_id];
  154. +   // rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, n);
  155. +   return ((struct rte_eth_dev_data *)api->data)->tx_queues[queue_id];
  156.  }
  157.  
  158.  /**
  159. @@ -1851,8 +1801,6 @@ _rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
  160.  _RTE_ETH_TX_PROTO(fn) \
  161.  { \
  162.     void *txq = _rte_eth_tx_prolog(port_id, queue_id, tx_pkts, &nb_pkts); \
  163. -   if (txq == NULL) \
  164. -       return 0; \
  165.     return fn(txq, tx_pkts, nb_pkts); \
  166.  }
  167.  
  168. diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
  169. index a41f3b2d57..8475b386c3 100644
  170. --- a/lib/ethdev/rte_ethdev.c
  171. +++ b/lib/ethdev/rte_ethdev.c
  172. @@ -5125,14 +5125,14 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
  173.     rte_spinlock_lock(&eth_dev_rx_cb_lock);
  174.     /* Add the callbacks in fifo order. */
  175.     struct rte_eth_rxtx_callback *tail =
  176. -       rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
  177. +       rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id];
  178.  
  179.     if (!tail) {
  180.         /* Stores to cb->fn and cb->param should complete before
  181.          * cb is visible to data plane.
  182.          */
  183.         __atomic_store_n(
  184. -           &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
  185. +           &rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id],
  186.             cb, __ATOMIC_RELEASE);
  187.  
  188.     } else {
  189. @@ -5175,13 +5175,13 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
  190.  
  191.     rte_spinlock_lock(&eth_dev_rx_cb_lock);
  192.     /* Add the callbacks at first position */
  193. -   cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
  194. +   cb->next = rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id];
  195.     /* Stores to cb->fn, cb->param and cb->next should complete before
  196.      * cb is visible to data plane threads.
  197.      */
  198.     __atomic_store_n(
  199. -       &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
  200. -       cb, __ATOMIC_RELEASE);
  201. +       &rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id], cb,
  202. +       __ATOMIC_RELEASE);
  203.     rte_spinlock_unlock(&eth_dev_rx_cb_lock);
  204.  
  205.     return cb;
  206. @@ -5223,14 +5223,14 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
  207.     rte_spinlock_lock(&eth_dev_tx_cb_lock);
  208.     /* Add the callbacks in fifo order. */
  209.     struct rte_eth_rxtx_callback *tail =
  210. -       rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
  211. +       rte_eth_burst_api[port_id].pre_tx_burst_cbs[queue_id];
  212.  
  213.     if (!tail) {
  214.         /* Stores to cb->fn and cb->param should complete before
  215.          * cb is visible to data plane.
  216.          */
  217.         __atomic_store_n(
  218. -           &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
  219. +           &rte_eth_burst_api[port_id].pre_tx_burst_cbs[queue_id],
  220.             cb, __ATOMIC_RELEASE);
  221.  
  222.     } else {
  223. @@ -5259,13 +5259,13 @@ rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
  224.             queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
  225.         return -EINVAL;
  226.  
  227. -   struct rte_eth_dev *dev = &rte_eth_devices[port_id];
  228. +   struct rte_eth_burst_api *api = &rte_eth_burst_api[port_id];
  229.     struct rte_eth_rxtx_callback *cb;
  230.     struct rte_eth_rxtx_callback **prev_cb;
  231.     int ret = -EINVAL;
  232.  
  233.     rte_spinlock_lock(&eth_dev_rx_cb_lock);
  234. -   prev_cb = &dev->post_rx_burst_cbs[queue_id];
  235. +   prev_cb = &api->post_rx_burst_cbs[queue_id];
  236.     for (; *prev_cb != NULL; prev_cb = &cb->next) {
  237.         cb = *prev_cb;
  238.         if (cb == user_cb) {
  239. @@ -5290,16 +5290,16 @@ rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
  240.     /* Check input parameters. */
  241.     RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
  242.     if (user_cb == NULL ||
  243. -           queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
  244. +       queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
  245.         return -EINVAL;
  246.  
  247. -   struct rte_eth_dev *dev = &rte_eth_devices[port_id];
  248. +   struct rte_eth_burst_api *api = &rte_eth_burst_api[port_id];
  249.     int ret = -EINVAL;
  250.     struct rte_eth_rxtx_callback *cb;
  251.     struct rte_eth_rxtx_callback **prev_cb;
  252.  
  253.     rte_spinlock_lock(&eth_dev_tx_cb_lock);
  254. -   prev_cb = &dev->pre_tx_burst_cbs[queue_id];
  255. +   prev_cb = &api->pre_tx_burst_cbs[queue_id];
  256.     for (; *prev_cb != NULL; prev_cb = &cb->next) {
  257.         cb = *prev_cb;
  258.         if (cb == user_cb) {
  259. diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
  260. index 0f425cf042..ff465e1b53 100644
  261. --- a/lib/ethdev/rte_ethdev.h
  262. +++ b/lib/ethdev/rte_ethdev.h
  263. @@ -5000,11 +5000,36 @@ static inline uint16_t
  264.  rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
  265.          struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
  266.  {
  267. -   if (port_id >= RTE_MAX_ETHPORTS)
  268. -       return 0;
  269. +   uint16_t nb_rx;
  270. +
  271. +   nb_rx = rte_eth_burst_api[port_id].rx_pkt_burst(port_id, queue_id,
  272. +                           rx_pkts, nb_pkts);
  273. +#if 1
  274. +   struct rte_eth_burst_api *api;
  275. +
  276. +   api = &rte_eth_burst_api[port_id];
  277. +
  278. +   struct rte_eth_rxtx_callback *cb;
  279. +
  280. +   /* __ATOMIC_RELEASE memory order was used when the
  281. +    * call back was inserted into the list.
  282. +    * Since there is a clear dependency between loading
  283. +    * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
  284. +    * not required.
  285. +    */
  286. +   cb = __atomic_load_n(&api->post_rx_burst_cbs[queue_id],
  287. +                __ATOMIC_RELAXED);
  288. +
  289. +   if (unlikely(cb != NULL)) {
  290. +       do {
  291. +           nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
  292. +                     nb_pkts, cb->param);
  293. +           cb = cb->next;
  294. +       } while (cb != NULL);
  295. +   }
  296. +#endif
  297.  
  298. -   return rte_eth_burst_api[port_id].rx_pkt_burst(port_id, queue_id,
  299. -           rx_pkts, nb_pkts);
  300. +   return nb_rx;
  301.  }
  302.  
  303.  /**
  304. @@ -5207,9 +5232,33 @@ static inline uint16_t
  305.  rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
  306.          struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
  307.  {
  308. -   if (port_id >= RTE_MAX_ETHPORTS)
  309. -       return 0;
  310. +   uint16_t n;
  311. +   struct rte_eth_burst_api *api;
  312. +
  313. +   n = nb_pkts;
  314. +   api = &rte_eth_burst_api[port_id];
  315. +
  316. +#if 1
  317. +   struct rte_eth_rxtx_callback *cb;
  318.  
  319. +   /* __ATOMIC_RELEASE memory order was used when the
  320. +    * call back was inserted into the list.
  321. +    * Since there is a clear dependency between loading
  322. +    * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
  323. +    * not required.
  324. +    */
  325. +   cb = __atomic_load_n(&api->pre_tx_burst_cbs[queue_id],
  326. +                __ATOMIC_RELAXED);
  327. +
  328. +   if (unlikely(cb != NULL)) {
  329. +       do {
  330. +           n = cb->fn.tx(port_id, queue_id, tx_pkts, n, cb->param);
  331. +           cb = cb->next;
  332. +       } while (cb != NULL);
  333. +   }
  334. +
  335. +   nb_pkts = n;
  336. +#endif
  337.     return rte_eth_burst_api[port_id].tx_pkt_burst(port_id, queue_id,
  338.             tx_pkts, nb_pkts);
  339.  }
  340. diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
  341. index 06f42ce899..b2cbf599c6 100644
  342. --- a/lib/ethdev/rte_ethdev_core.h
  343. +++ b/lib/ethdev/rte_ethdev_core.h
  344. @@ -72,7 +72,22 @@ typedef int (*rte_eth_tx_descriptor_status_t)(uint16_t port_id,
  345.  typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
  346.  /**< @internal Check the status of a Tx descriptor */
  347.  
  348. +/**
  349. + * @internal
  350. + * Structure used to hold information about the callbacks to be called for a
  351. + * queue on RX and TX.
  352. + */
  353. +struct rte_eth_rxtx_callback {
  354. +   struct rte_eth_rxtx_callback *next;
  355. +   union {
  356. +       rte_rx_callback_fn rx;
  357. +       rte_tx_callback_fn tx;
  358. +   } fn;
  359. +   void *param;
  360. +};
  361. +
  362.  struct rte_eth_burst_api {
  363. +   void *data;
  364.     rte_eth_rx_burst_t rx_pkt_burst;
  365.     /**< PMD receive function. */
  366.     rte_eth_tx_burst_t tx_pkt_burst;
  367. @@ -85,8 +100,19 @@ struct rte_eth_burst_api {
  368.     /**< Check the status of a Rx descriptor. */
  369.     rte_eth_tx_descriptor_status_t tx_descriptor_status;
  370.     /**< Check the status of a Tx descriptor. */
  371. +   /**
  372. +    * User-supplied functions called from rx_burst to post-process
  373. +    * received packets before passing them to the user
  374. +    */
  375. +   struct rte_eth_rxtx_callback
  376. +       *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
  377. +   /**
  378. +    * User-supplied functions called from tx_burst to pre-process
  379. +    * received packets before passing them to the driver for transmission.
  380. +    */
  381. +   struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
  382.     uintptr_t reserved[2];
  383. -} __rte_cache_min_aligned;
  384. +} __rte_cache_aligned;
  385.  
  386.  extern struct rte_eth_burst_api rte_eth_burst_api[RTE_MAX_ETHPORTS];
  387.  
  388.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement