Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
- index 780d32dc7f..804f4273c0 100644
- --- a/drivers/net/octeontx2/otx2_ethdev.c
- +++ b/drivers/net/octeontx2/otx2_ethdev.c
- @@ -1899,6 +1899,7 @@ otx2_nix_configure(struct rte_eth_dev *eth_dev)
- nix_lf_free(dev);
- }
- + rte_eth_burst_api[eth_dev->data->port_id].data = eth_dev->data;
- dev->rx_offloads = rxmode->offloads;
- dev->tx_offloads = txmode->offloads;
- dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
- diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
- index f931fd10e1..fa8fae50ac 100644
- --- a/lib/ethdev/ethdev_driver.h
- +++ b/lib/ethdev/ethdev_driver.h
- @@ -21,20 +21,6 @@
- extern "C" {
- #endif
- -/**
- - * @internal
- - * Structure used to hold information about the callbacks to be called for a
- - * queue on RX and TX.
- - */
- -struct rte_eth_rxtx_callback {
- - struct rte_eth_rxtx_callback *next;
- - union{
- - rte_rx_callback_fn rx;
- - rte_tx_callback_fn tx;
- - } fn;
- - void *param;
- -};
- -
- /**
- * @internal
- * The generic data structure associated with each ethernet device.
- @@ -59,16 +45,6 @@ struct rte_eth_dev {
- struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
- /** User application callbacks for NIC interrupts */
- struct rte_eth_dev_cb_list link_intr_cbs;
- - /**
- - * User-supplied functions called from rx_burst to post-process
- - * received packets before passing them to the user
- - */
- - struct rte_eth_rxtx_callback *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
- - /**
- - * User-supplied functions called from tx_burst to pre-process
- - * received packets before passing them to the driver for transmission.
- - */
- - struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
- enum rte_eth_dev_state state; /**< Flag indicating the port state */
- void *security_ctx; /**< Context for security ops */
- @@ -1667,9 +1643,8 @@ __rte_internal
- static inline void *
- _rte_eth_rx_prolog(uint16_t port_id, uint16_t queue_id)
- {
- - struct rte_eth_dev *dev;
- -
- - dev = &rte_eth_devices[port_id];
- + struct rte_eth_dev_data *data;
- + data = rte_eth_burst_api[port_id].data;
- #ifdef RTE_ETHDEV_DEBUG_RX
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
- @@ -1679,7 +1654,7 @@ _rte_eth_rx_prolog(uint16_t port_id, uint16_t queue_id)
- return NULL;
- }
- #endif
- - return dev->data->rx_queues[queue_id];
- + return data->rx_queues[queue_id];
- }
- /**
- @@ -1708,11 +1683,12 @@ static inline uint16_t
- _rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id,
- struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts)
- {
- - struct rte_eth_dev *dev;
- + RTE_SET_USED(nb_pkts);
- +#if 0
- + struct rte_eth_burst_api *api;
- - dev = &rte_eth_devices[port_id];
- + api = &rte_eth_burst_api[port_id];
- -#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb;
- /* __ATOMIC_RELEASE memory order was used when the
- @@ -1721,8 +1697,8 @@ _rte_eth_rx_epilog(uint16_t port_id, uint16_t queue_id,
- * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
- * not required.
- */
- - cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
- - __ATOMIC_RELAXED);
- + cb = __atomic_load_n(&api->post_rx_burst_cbs[queue_id],
- + __ATOMIC_RELAXED);
- if (unlikely(cb != NULL)) {
- do {
- @@ -1756,8 +1732,6 @@ _RTE_ETH_RX_PROTO(fn) \
- { \
- uint16_t nb_rx; \
- void *rxq = _rte_eth_rx_prolog(port_id, queue_id); \
- - if (rxq == NULL) \
- - return 0; \
- nb_rx = fn(rxq, rx_pkts, nb_pkts); \
- return _rte_eth_rx_epilog(port_id, queue_id, rx_pkts, nb_rx, nb_pkts); \
- }
- @@ -1794,11 +1768,9 @@ static inline void *
- _rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
- struct rte_mbuf **tx_pkts, uint16_t *nb_pkts)
- {
- - uint16_t n;
- - struct rte_eth_dev *dev;
- + struct rte_eth_burst_api *api;
- - n = *nb_pkts;
- - dev = &rte_eth_devices[port_id];
- + api = &rte_eth_burst_api[port_id];
- #ifdef RTE_ETHDEV_DEBUG_TX
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
- @@ -1809,30 +1781,8 @@ _rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
- }
- #endif
- -#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- - struct rte_eth_rxtx_callback *cb;
- -
- - /* __ATOMIC_RELEASE memory order was used when the
- - * call back was inserted into the list.
- - * Since there is a clear dependency between loading
- - * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
- - * not required.
- - */
- - cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
- - __ATOMIC_RELAXED);
- -
- - if (unlikely(cb != NULL)) {
- - do {
- - n = cb->fn.tx(port_id, queue_id, tx_pkts, n, cb->param);
- - cb = cb->next;
- - } while (cb != NULL);
- - }
- -
- - *nb_pkts = n;
- -#endif
- -
- - rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, n);
- - return dev->data->tx_queues[queue_id];
- + // rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, n);
- + return ((struct rte_eth_dev_data *)api->data)->tx_queues[queue_id];
- }
- /**
- @@ -1851,8 +1801,6 @@ _rte_eth_tx_prolog(uint16_t port_id, uint16_t queue_id,
- _RTE_ETH_TX_PROTO(fn) \
- { \
- void *txq = _rte_eth_tx_prolog(port_id, queue_id, tx_pkts, &nb_pkts); \
- - if (txq == NULL) \
- - return 0; \
- return fn(txq, tx_pkts, nb_pkts); \
- }
- diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
- index a41f3b2d57..8475b386c3 100644
- --- a/lib/ethdev/rte_ethdev.c
- +++ b/lib/ethdev/rte_ethdev.c
- @@ -5125,14 +5125,14 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
- rte_spinlock_lock(ð_dev_rx_cb_lock);
- /* Add the callbacks in fifo order. */
- struct rte_eth_rxtx_callback *tail =
- - rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
- + rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id];
- if (!tail) {
- /* Stores to cb->fn and cb->param should complete before
- * cb is visible to data plane.
- */
- __atomic_store_n(
- - &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
- + &rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
- } else {
- @@ -5175,13 +5175,13 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
- rte_spinlock_lock(ð_dev_rx_cb_lock);
- /* Add the callbacks at first position */
- - cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
- + cb->next = rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id];
- /* Stores to cb->fn, cb->param and cb->next should complete before
- * cb is visible to data plane threads.
- */
- __atomic_store_n(
- - &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
- - cb, __ATOMIC_RELEASE);
- + &rte_eth_burst_api[port_id].post_rx_burst_cbs[queue_id], cb,
- + __ATOMIC_RELEASE);
- rte_spinlock_unlock(ð_dev_rx_cb_lock);
- return cb;
- @@ -5223,14 +5223,14 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
- rte_spinlock_lock(ð_dev_tx_cb_lock);
- /* Add the callbacks in fifo order. */
- struct rte_eth_rxtx_callback *tail =
- - rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
- + rte_eth_burst_api[port_id].pre_tx_burst_cbs[queue_id];
- if (!tail) {
- /* Stores to cb->fn and cb->param should complete before
- * cb is visible to data plane.
- */
- __atomic_store_n(
- - &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
- + &rte_eth_burst_api[port_id].pre_tx_burst_cbs[queue_id],
- cb, __ATOMIC_RELEASE);
- } else {
- @@ -5259,13 +5259,13 @@ rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
- queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
- return -EINVAL;
- - struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- + struct rte_eth_burst_api *api = &rte_eth_burst_api[port_id];
- struct rte_eth_rxtx_callback *cb;
- struct rte_eth_rxtx_callback **prev_cb;
- int ret = -EINVAL;
- rte_spinlock_lock(ð_dev_rx_cb_lock);
- - prev_cb = &dev->post_rx_burst_cbs[queue_id];
- + prev_cb = &api->post_rx_burst_cbs[queue_id];
- for (; *prev_cb != NULL; prev_cb = &cb->next) {
- cb = *prev_cb;
- if (cb == user_cb) {
- @@ -5290,16 +5290,16 @@ rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
- /* Check input parameters. */
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (user_cb == NULL ||
- - queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
- + queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
- return -EINVAL;
- - struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- + struct rte_eth_burst_api *api = &rte_eth_burst_api[port_id];
- int ret = -EINVAL;
- struct rte_eth_rxtx_callback *cb;
- struct rte_eth_rxtx_callback **prev_cb;
- rte_spinlock_lock(ð_dev_tx_cb_lock);
- - prev_cb = &dev->pre_tx_burst_cbs[queue_id];
- + prev_cb = &api->pre_tx_burst_cbs[queue_id];
- for (; *prev_cb != NULL; prev_cb = &cb->next) {
- cb = *prev_cb;
- if (cb == user_cb) {
- diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
- index 0f425cf042..ff465e1b53 100644
- --- a/lib/ethdev/rte_ethdev.h
- +++ b/lib/ethdev/rte_ethdev.h
- @@ -5000,11 +5000,36 @@ static inline uint16_t
- rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
- struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
- {
- - if (port_id >= RTE_MAX_ETHPORTS)
- - return 0;
- + uint16_t nb_rx;
- +
- + nb_rx = rte_eth_burst_api[port_id].rx_pkt_burst(port_id, queue_id,
- + rx_pkts, nb_pkts);
- +#if 1
- + struct rte_eth_burst_api *api;
- +
- + api = &rte_eth_burst_api[port_id];
- +
- + struct rte_eth_rxtx_callback *cb;
- +
- + /* __ATOMIC_RELEASE memory order was used when the
- + * call back was inserted into the list.
- + * Since there is a clear dependency between loading
- + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
- + * not required.
- + */
- + cb = __atomic_load_n(&api->post_rx_burst_cbs[queue_id],
- + __ATOMIC_RELAXED);
- +
- + if (unlikely(cb != NULL)) {
- + do {
- + nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
- + nb_pkts, cb->param);
- + cb = cb->next;
- + } while (cb != NULL);
- + }
- +#endif
- - return rte_eth_burst_api[port_id].rx_pkt_burst(port_id, queue_id,
- - rx_pkts, nb_pkts);
- + return nb_rx;
- }
- /**
- @@ -5207,9 +5232,33 @@ static inline uint16_t
- rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
- {
- - if (port_id >= RTE_MAX_ETHPORTS)
- - return 0;
- + uint16_t n;
- + struct rte_eth_burst_api *api;
- +
- + n = nb_pkts;
- + api = &rte_eth_burst_api[port_id];
- +
- +#if 1
- + struct rte_eth_rxtx_callback *cb;
- + /* __ATOMIC_RELEASE memory order was used when the
- + * call back was inserted into the list.
- + * Since there is a clear dependency between loading
- + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
- + * not required.
- + */
- + cb = __atomic_load_n(&api->pre_tx_burst_cbs[queue_id],
- + __ATOMIC_RELAXED);
- +
- + if (unlikely(cb != NULL)) {
- + do {
- + n = cb->fn.tx(port_id, queue_id, tx_pkts, n, cb->param);
- + cb = cb->next;
- + } while (cb != NULL);
- + }
- +
- + nb_pkts = n;
- +#endif
- return rte_eth_burst_api[port_id].tx_pkt_burst(port_id, queue_id,
- tx_pkts, nb_pkts);
- }
- diff --git a/lib/ethdev/rte_ethdev_core.h b/lib/ethdev/rte_ethdev_core.h
- index 06f42ce899..b2cbf599c6 100644
- --- a/lib/ethdev/rte_ethdev_core.h
- +++ b/lib/ethdev/rte_ethdev_core.h
- @@ -72,7 +72,22 @@ typedef int (*rte_eth_tx_descriptor_status_t)(uint16_t port_id,
- typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
- /**< @internal Check the status of a Tx descriptor */
- +/**
- + * @internal
- + * Structure used to hold information about the callbacks to be called for a
- + * queue on RX and TX.
- + */
- +struct rte_eth_rxtx_callback {
- + struct rte_eth_rxtx_callback *next;
- + union {
- + rte_rx_callback_fn rx;
- + rte_tx_callback_fn tx;
- + } fn;
- + void *param;
- +};
- +
- struct rte_eth_burst_api {
- + void *data;
- rte_eth_rx_burst_t rx_pkt_burst;
- /**< PMD receive function. */
- rte_eth_tx_burst_t tx_pkt_burst;
- @@ -85,8 +100,19 @@ struct rte_eth_burst_api {
- /**< Check the status of a Rx descriptor. */
- rte_eth_tx_descriptor_status_t tx_descriptor_status;
- /**< Check the status of a Tx descriptor. */
- + /**
- + * User-supplied functions called from rx_burst to post-process
- + * received packets before passing them to the user
- + */
- + struct rte_eth_rxtx_callback
- + *post_rx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
- + /**
- + * User-supplied functions called from tx_burst to pre-process
- + * received packets before passing them to the driver for transmission.
- + */
- + struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
- uintptr_t reserved[2];
- -} __rte_cache_min_aligned;
- +} __rte_cache_aligned;
- extern struct rte_eth_burst_api rte_eth_burst_api[RTE_MAX_ETHPORTS];
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement