Advertisement
Guest User

Untitled

a guest
Mar 29th, 2024
157
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 26.08 KB | None | 0 0
  1. diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
  2. index 1359c5fb1f..827d271169 100644
  3. --- a/lib/vhost/virtio_net.c
  4. +++ b/lib/vhost/virtio_net.c
  5. @@ -26,7 +26,7 @@
  6.  
  7. #define MAX_BATCH_LEN 256
  8.  
  9. -static __rte_always_inline uint16_t
  10. +static uint16_t
  11. async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  12. struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
  13. uint16_t vchan_id, bool legacy_ol_flags);
  14. @@ -34,13 +34,13 @@ async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  15. /* DMA device copy operation tracking array. */
  16. struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
  17.  
  18. -static __rte_always_inline bool
  19. +static bool
  20. rxvq_is_mergeable(struct virtio_net *dev)
  21. {
  22. return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
  23. }
  24.  
  25. -static __rte_always_inline bool
  26. +static bool
  27. virtio_net_is_inorder(struct virtio_net *dev)
  28. {
  29. return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
  30. @@ -52,7 +52,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
  31. return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
  32. }
  33.  
  34. -static inline void
  35. +static void
  36. vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
  37. struct rte_mbuf **pkts, uint16_t count)
  38. __rte_shared_locks_required(&vq->access_lock)
  39. @@ -98,7 +98,7 @@ vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
  40. }
  41. }
  42.  
  43. -static __rte_always_inline int64_t
  44. +static int64_t
  45. vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
  46. int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
  47. struct vhost_iov_iter *pkt)
  48. @@ -148,7 +148,7 @@ vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
  49. return nr_segs;
  50. }
  51.  
  52. -static __rte_always_inline uint16_t
  53. +static uint16_t
  54. vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
  55. int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
  56. struct vhost_iov_iter *pkts, uint16_t nr_pkts)
  57. @@ -180,7 +180,7 @@ vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
  58. return pkt_idx;
  59. }
  60.  
  61. -static __rte_always_inline uint16_t
  62. +static uint16_t
  63. vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t vchan_id,
  64. uint16_t max_pkts)
  65. {
  66. @@ -231,7 +231,7 @@ vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t
  67. return nr_copies;
  68. }
  69.  
  70. -static inline void
  71. +static void
  72. do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
  73. __rte_shared_locks_required(&vq->iotlb_lock)
  74. {
  75. @@ -249,7 +249,7 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
  76. vq->batch_copy_nb_elems = 0;
  77. }
  78.  
  79. -static inline void
  80. +static void
  81. do_data_copy_dequeue(struct vhost_virtqueue *vq)
  82. {
  83. struct batch_copy_elem *elem = vq->batch_copy_elems;
  84. @@ -262,7 +262,7 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq)
  85. vq->batch_copy_nb_elems = 0;
  86. }
  87.  
  88. -static __rte_always_inline void
  89. +static void
  90. do_flush_shadow_used_ring_split(struct virtio_net *dev,
  91. struct vhost_virtqueue *vq,
  92. uint16_t to, uint16_t from, uint16_t size)
  93. @@ -275,7 +275,7 @@ do_flush_shadow_used_ring_split(struct virtio_net *dev,
  94. size * sizeof(struct vring_used_elem));
  95. }
  96.  
  97. -static __rte_always_inline void
  98. +static void
  99. flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
  100. {
  101. uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
  102. @@ -305,7 +305,7 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
  103. sizeof(vq->used->idx));
  104. }
  105.  
  106. -static __rte_always_inline void
  107. +static void
  108. update_shadow_used_ring_split(struct vhost_virtqueue *vq,
  109. uint16_t desc_idx, uint32_t len)
  110. {
  111. @@ -315,7 +315,7 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq,
  112. vq->shadow_used_split[i].len = len;
  113. }
  114.  
  115. -static __rte_always_inline void
  116. +static void
  117. vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
  118. struct vhost_virtqueue *vq)
  119. {
  120. @@ -379,7 +379,7 @@ vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
  121. vhost_log_cache_sync(dev, vq);
  122. }
  123.  
  124. -static __rte_always_inline void
  125. +static void
  126. vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
  127. struct vhost_virtqueue *vq)
  128. {
  129. @@ -398,7 +398,7 @@ vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
  130. vhost_log_cache_sync(dev, vq);
  131. }
  132.  
  133. -static __rte_always_inline void
  134. +static void
  135. vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
  136. struct vhost_virtqueue *vq,
  137. uint64_t *lens,
  138. @@ -434,7 +434,7 @@ vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
  139. vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
  140. }
  141.  
  142. -static __rte_always_inline void
  143. +static void
  144. vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
  145. uint64_t *lens,
  146. uint16_t *ids)
  147. @@ -453,7 +453,7 @@ vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
  148. }
  149. }
  150.  
  151. -static __rte_always_inline void
  152. +static void
  153. vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq, uint16_t *ids)
  154. __rte_shared_locks_required(&vq->access_lock)
  155. {
  156. @@ -471,7 +471,7 @@ vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq, uint16_t *id
  157. }
  158. }
  159.  
  160. -static __rte_always_inline void
  161. +static void
  162. vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
  163. uint16_t id)
  164. {
  165. @@ -489,7 +489,7 @@ vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
  166. vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
  167. }
  168.  
  169. -static __rte_always_inline void
  170. +static void
  171. vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
  172. struct vhost_virtqueue *vq,
  173. uint16_t *ids)
  174. @@ -529,7 +529,7 @@ vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
  175. vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
  176. }
  177.  
  178. -static __rte_always_inline void
  179. +static void
  180. vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
  181. uint16_t buf_id,
  182. uint16_t count)
  183. @@ -561,7 +561,7 @@ vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
  184. vq_inc_last_used_packed(vq, count);
  185. }
  186.  
  187. -static __rte_always_inline void
  188. +static void
  189. vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
  190. uint16_t buf_id,
  191. uint16_t count)
  192. @@ -589,7 +589,7 @@ vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
  193. vq_inc_last_used_packed(vq, count);
  194. }
  195.  
  196. -static __rte_always_inline void
  197. +static void
  198. vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
  199. uint32_t *len,
  200. uint16_t *id,
  201. @@ -611,7 +611,7 @@ vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
  202. }
  203. }
  204.  
  205. -static __rte_always_inline void
  206. +static void
  207. vhost_async_shadow_enqueue_packed(struct vhost_virtqueue *vq,
  208. uint32_t *len,
  209. uint16_t *id,
  210. @@ -632,7 +632,7 @@ vhost_async_shadow_enqueue_packed(struct vhost_virtqueue *vq,
  211. }
  212. }
  213.  
  214. -static __rte_always_inline void
  215. +static void
  216. vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
  217. struct vhost_virtqueue *vq,
  218. uint32_t *len,
  219. @@ -655,7 +655,7 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
  220. (var) = (val); \
  221. } while (0)
  222.  
  223. -static __rte_always_inline void
  224. +static void
  225. virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
  226. {
  227. uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
  228. @@ -727,7 +727,7 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
  229. }
  230. }
  231.  
  232. -static __rte_always_inline int
  233. +static int
  234. map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
  235. struct buf_vector *buf_vec, uint16_t *vec_idx,
  236. uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
  237. @@ -764,7 +764,7 @@ map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
  238. return 0;
  239. }
  240.  
  241. -static __rte_always_inline int
  242. +static int
  243. fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  244. uint32_t avail_idx, uint16_t *vec_idx,
  245. struct buf_vector *buf_vec, uint16_t *desc_chain_head,
  246. @@ -848,7 +848,7 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  247. /*
  248. * Returns -1 on fail, 0 on success
  249. */
  250. -static inline int
  251. +static int
  252. reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  253. uint64_t size, struct buf_vector *buf_vec,
  254. uint16_t *num_buffers, uint16_t avail_head,
  255. @@ -899,7 +899,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  256. return 0;
  257. }
  258.  
  259. -static __rte_always_inline int
  260. +static int
  261. fill_vec_buf_packed_indirect(struct virtio_net *dev,
  262. struct vhost_virtqueue *vq,
  263. struct vring_packed_desc *desc, uint16_t *vec_idx,
  264. @@ -958,7 +958,7 @@ fill_vec_buf_packed_indirect(struct virtio_net *dev,
  265. return 0;
  266. }
  267.  
  268. -static __rte_always_inline int
  269. +static int
  270. fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  271. uint16_t avail_idx, uint16_t *desc_count,
  272. struct buf_vector *buf_vec, uint16_t *vec_idx,
  273. @@ -1025,7 +1025,7 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  274. return 0;
  275. }
  276.  
  277. -static __rte_noinline void
  278. +static void
  279. copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
  280. struct buf_vector *buf_vec,
  281. struct virtio_net_hdr_mrg_rxbuf *hdr)
  282. @@ -1056,7 +1056,7 @@ copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
  283. }
  284. }
  285.  
  286. -static __rte_always_inline int
  287. +static int
  288. async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
  289. {
  290. struct vhost_iov_iter *iter;
  291. @@ -1073,7 +1073,7 @@ async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
  292. return 0;
  293. }
  294.  
  295. -static __rte_always_inline int
  296. +static int
  297. async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
  298. void *src, void *dst, size_t len)
  299. {
  300. @@ -1104,13 +1104,13 @@ async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
  301. return 0;
  302. }
  303.  
  304. -static __rte_always_inline void
  305. +static void
  306. async_iter_finalize(struct vhost_async *async)
  307. {
  308. async->iter_idx++;
  309. }
  310.  
  311. -static __rte_always_inline void
  312. +static void
  313. async_iter_cancel(struct vhost_async *async)
  314. {
  315. struct vhost_iov_iter *iter;
  316. @@ -1121,14 +1121,14 @@ async_iter_cancel(struct vhost_async *async)
  317. iter->iov = NULL;
  318. }
  319.  
  320. -static __rte_always_inline void
  321. +static void
  322. async_iter_reset(struct vhost_async *async)
  323. {
  324. async->iter_idx = 0;
  325. async->iovec_idx = 0;
  326. }
  327.  
  328. -static __rte_always_inline int
  329. +static int
  330. async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
  331. struct rte_mbuf *m, uint32_t mbuf_offset,
  332. uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
  333. @@ -1170,7 +1170,7 @@ async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
  334. return 0;
  335. }
  336.  
  337. -static __rte_always_inline void
  338. +static void
  339. sync_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
  340. struct rte_mbuf *m, uint32_t mbuf_offset,
  341. uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
  342. @@ -1208,7 +1208,7 @@ sync_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
  343. }
  344. }
  345.  
  346. -static __rte_always_inline int
  347. +static int
  348. mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
  349. struct rte_mbuf *m, struct buf_vector *buf_vec,
  350. uint16_t nr_vec, uint16_t num_buffers, bool is_async)
  351. @@ -1336,7 +1336,7 @@ mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
  352. return -1;
  353. }
  354.  
  355. -static __rte_always_inline int
  356. +static int
  357. vhost_enqueue_single_packed(struct virtio_net *dev,
  358. struct vhost_virtqueue *vq,
  359. struct rte_mbuf *pkt,
  360. @@ -1401,7 +1401,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
  361. return 0;
  362. }
  363.  
  364. -static __rte_noinline uint32_t
  365. +static uint32_t
  366. virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  367. struct rte_mbuf **pkts, uint32_t count)
  368. __rte_shared_locks_required(&vq->access_lock)
  369. @@ -1457,7 +1457,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  370. return pkt_idx;
  371. }
  372.  
  373. -static __rte_always_inline int
  374. +static int
  375. virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
  376. struct vhost_virtqueue *vq,
  377. struct rte_mbuf **pkts,
  378. @@ -1509,7 +1509,7 @@ virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
  379. return 0;
  380. }
  381.  
  382. -static __rte_always_inline int
  383. +static int
  384. virtio_dev_rx_async_batch_check(struct vhost_virtqueue *vq,
  385. struct rte_mbuf **pkts,
  386. uint64_t *desc_addrs,
  387. @@ -1561,7 +1561,7 @@ virtio_dev_rx_async_batch_check(struct vhost_virtqueue *vq,
  388. return 0;
  389. }
  390.  
  391. -static __rte_always_inline void
  392. +static void
  393. virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
  394. struct vhost_virtqueue *vq,
  395. struct rte_mbuf **pkts,
  396. @@ -1611,7 +1611,7 @@ virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
  397. vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
  398. }
  399.  
  400. -static __rte_always_inline int
  401. +static int
  402. virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
  403. struct vhost_virtqueue *vq,
  404. struct rte_mbuf **pkts)
  405. @@ -1633,7 +1633,7 @@ virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
  406. return 0;
  407. }
  408.  
  409. -static __rte_always_inline int16_t
  410. +static int16_t
  411. virtio_dev_rx_single_packed(struct virtio_net *dev,
  412. struct vhost_virtqueue *vq,
  413. struct rte_mbuf *pkt)
  414. @@ -1658,7 +1658,7 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,
  415. return 0;
  416. }
  417.  
  418. -static __rte_noinline uint32_t
  419. +static uint32_t
  420. virtio_dev_rx_packed(struct virtio_net *dev,
  421. struct vhost_virtqueue *__rte_restrict vq,
  422. struct rte_mbuf **__rte_restrict pkts,
  423. @@ -1707,7 +1707,7 @@ virtio_dev_vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
  424. rte_rwlock_write_unlock(&vq->access_lock);
  425. }
  426.  
  427. -static __rte_always_inline uint32_t
  428. +static uint32_t
  429. virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
  430. struct rte_mbuf **pkts, uint32_t count)
  431. {
  432. @@ -1776,7 +1776,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
  433. return virtio_dev_rx(dev, dev->virtqueue[queue_id], pkts, count);
  434. }
  435.  
  436. -static __rte_always_inline uint16_t
  437. +static uint16_t
  438. async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
  439. __rte_shared_locks_required(&vq->access_lock)
  440. {
  441. @@ -1788,7 +1788,7 @@ async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
  442. return vq->size - async->pkts_inflight_n + async->pkts_idx;
  443. }
  444.  
  445. -static __rte_always_inline void
  446. +static void
  447. store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem *d_ring,
  448. uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
  449. {
  450. @@ -1804,7 +1804,7 @@ store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem
  451. }
  452. }
  453.  
  454. -static __rte_noinline uint32_t
  455. +static uint32_t
  456. virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  457. struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
  458. __rte_exclusive_locks_required(&vq->access_lock)
  459. @@ -1909,7 +1909,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
  460. }
  461.  
  462.  
  463. -static __rte_always_inline int
  464. +static int
  465. vhost_enqueue_async_packed(struct virtio_net *dev,
  466. struct vhost_virtqueue *vq,
  467. struct rte_mbuf *pkt,
  468. @@ -1973,7 +1973,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
  469. return 0;
  470. }
  471.  
  472. -static __rte_always_inline int16_t
  473. +static int16_t
  474. virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  475. struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers)
  476. __rte_exclusive_locks_required(&vq->access_lock)
  477. @@ -1994,7 +1994,7 @@ virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  478. return 0;
  479. }
  480.  
  481. -static __rte_always_inline void
  482. +static void
  483. virtio_dev_rx_async_packed_batch_enqueue(struct virtio_net *dev,
  484. struct vhost_virtqueue *vq,
  485. struct rte_mbuf **pkts,
  486. @@ -2057,7 +2057,7 @@ virtio_dev_rx_async_packed_batch_enqueue(struct virtio_net *dev,
  487. vhost_async_shadow_enqueue_packed_batch(vq, lens, ids);
  488. }
  489.  
  490. -static __rte_always_inline int
  491. +static int
  492. virtio_dev_rx_async_packed_batch(struct virtio_net *dev,
  493. struct vhost_virtqueue *vq,
  494. struct rte_mbuf **pkts,
  495. @@ -2076,7 +2076,7 @@ virtio_dev_rx_async_packed_batch(struct virtio_net *dev,
  496. return 0;
  497. }
  498.  
  499. -static __rte_always_inline void
  500. +static void
  501. dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
  502. uint32_t nr_err, uint32_t *pkt_idx)
  503. __rte_exclusive_locks_required(&vq->access_lock)
  504. @@ -2107,7 +2107,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
  505. async->buffer_idx_packed = async->buffer_idx_packed + vq->size - buffers_err;
  506. }
  507.  
  508. -static __rte_noinline uint32_t
  509. +static uint32_t
  510. virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  511. struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
  512. __rte_exclusive_locks_required(&vq->access_lock)
  513. @@ -2182,7 +2182,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
  514. return pkt_idx;
  515. }
  516.  
  517. -static __rte_always_inline void
  518. +static void
  519. write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
  520. __rte_shared_locks_required(&vq->access_lock)
  521. {
  522. @@ -2214,7 +2214,7 @@ write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
  523. } while (nr_left > 0);
  524. }
  525.  
  526. -static __rte_always_inline void
  527. +static void
  528. write_back_completed_descs_packed(struct vhost_virtqueue *vq,
  529. uint16_t n_buffers)
  530. __rte_shared_locks_required(&vq->access_lock)
  531. @@ -2279,7 +2279,7 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
  532. async->last_buffer_idx_packed = from;
  533. }
  534.  
  535. -static __rte_always_inline uint16_t
  536. +static uint16_t
  537. vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  538. struct rte_mbuf **pkts, uint16_t count, int16_t dma_id, uint16_t vchan_id)
  539. __rte_shared_locks_required(&vq->access_lock)
  540. @@ -2521,7 +2521,7 @@ rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
  541. return n_pkts_cpl;
  542. }
  543.  
  544. -static __rte_always_inline uint32_t
  545. +static uint32_t
  546. virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
  547. struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
  548. {
  549. @@ -2603,7 +2603,7 @@ rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
  550. dma_id, vchan_id);
  551. }
  552.  
  553. -static inline bool
  554. +static bool
  555. virtio_net_with_host_offload(struct virtio_net *dev)
  556. {
  557. if (dev->features &
  558. @@ -2702,7 +2702,7 @@ parse_headers(struct rte_mbuf *m, uint8_t *l4_proto)
  559. return -EINVAL;
  560. }
  561.  
  562. -static __rte_always_inline void
  563. +static void
  564. vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
  565. struct rte_mbuf *m)
  566. {
  567. @@ -2778,7 +2778,7 @@ vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
  568. m->ol_flags = 0;
  569. }
  570.  
  571. -static __rte_always_inline void
  572. +static void
  573. vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
  574. struct rte_mbuf *m, bool legacy_ol_flags)
  575. {
  576. @@ -2866,7 +2866,7 @@ vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
  577. }
  578. }
  579.  
  580. -static __rte_noinline void
  581. +static void
  582. copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
  583. struct buf_vector *buf_vec)
  584. {
  585. @@ -2887,7 +2887,7 @@ copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
  586. }
  587. }
  588.  
  589. -static __rte_always_inline int
  590. +static int
  591. desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
  592. struct buf_vector *buf_vec, uint16_t nr_vec,
  593. struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
  594. @@ -3077,7 +3077,7 @@ virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t s
  595. /*
  596. * Prepare a host supported pktmbuf.
  597. */
  598. -static __rte_always_inline int
  599. +static int
  600. virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
  601. uint32_t data_len)
  602. {
  603. @@ -3095,7 +3095,7 @@ virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
  604. return -1;
  605. }
  606.  
  607. -__rte_always_inline
  608. +
  609. static uint16_t
  610. virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  611. struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
  612. @@ -3190,7 +3190,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  613. return i;
  614. }
  615.  
  616. -__rte_noinline
  617. +
  618. static uint16_t
  619. virtio_dev_tx_split_legacy(struct virtio_net *dev,
  620. struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
  621. @@ -3201,7 +3201,7 @@ virtio_dev_tx_split_legacy(struct virtio_net *dev,
  622. return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
  623. }
  624.  
  625. -__rte_noinline
  626. +
  627. static uint16_t
  628. virtio_dev_tx_split_compliant(struct virtio_net *dev,
  629. struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
  630. @@ -3212,7 +3212,7 @@ virtio_dev_tx_split_compliant(struct virtio_net *dev,
  631. return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
  632. }
  633.  
  634. -static __rte_always_inline int
  635. +static int
  636. vhost_reserve_avail_batch_packed(struct virtio_net *dev,
  637. struct vhost_virtqueue *vq,
  638. struct rte_mbuf **pkts,
  639. @@ -3284,7 +3284,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
  640. return -1;
  641. }
  642.  
  643. -static __rte_always_inline int
  644. +static int
  645. vhost_async_tx_batch_packed_check(struct virtio_net *dev,
  646. struct vhost_virtqueue *vq,
  647. struct rte_mbuf **pkts,
  648. @@ -3358,7 +3358,7 @@ vhost_async_tx_batch_packed_check(struct virtio_net *dev,
  649. return -1;
  650. }
  651.  
  652. -static __rte_always_inline int
  653. +static int
  654. virtio_dev_tx_batch_packed(struct virtio_net *dev,
  655. struct vhost_virtqueue *vq,
  656. struct rte_mbuf **pkts,
  657. @@ -3402,7 +3402,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
  658. return 0;
  659. }
  660.  
  661. -static __rte_always_inline int
  662. +static int
  663. vhost_dequeue_single_packed(struct virtio_net *dev,
  664. struct vhost_virtqueue *vq,
  665. struct rte_mempool *mbuf_pool,
  666. @@ -3454,7 +3454,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
  667. return 0;
  668. }
  669.  
  670. -static __rte_always_inline int
  671. +static int
  672. virtio_dev_tx_single_packed(struct virtio_net *dev,
  673. struct vhost_virtqueue *vq,
  674. struct rte_mempool *mbuf_pool,
  675. @@ -3484,7 +3484,7 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
  676. return ret;
  677. }
  678.  
  679. -__rte_always_inline
  680. +
  681. static uint16_t
  682. virtio_dev_tx_packed(struct virtio_net *dev,
  683. struct vhost_virtqueue *__rte_restrict vq,
  684. @@ -3534,7 +3534,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
  685. return pkt_idx;
  686. }
  687.  
  688. -__rte_noinline
  689. +
  690. static uint16_t
  691. virtio_dev_tx_packed_legacy(struct virtio_net *dev,
  692. struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
  693. @@ -3545,7 +3545,7 @@ virtio_dev_tx_packed_legacy(struct virtio_net *dev,
  694. return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
  695. }
  696.  
  697. -__rte_noinline
  698. +
  699. static uint16_t
  700. virtio_dev_tx_packed_compliant(struct virtio_net *dev,
  701. struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
  702. @@ -3666,7 +3666,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
  703. return count;
  704. }
  705.  
  706. -static __rte_always_inline uint16_t
  707. +static uint16_t
  708. async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  709. struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
  710. uint16_t vchan_id, bool legacy_ol_flags)
  711. @@ -3714,7 +3714,7 @@ async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  712. return nr_cpl_pkts;
  713. }
  714.  
  715. -static __rte_always_inline uint16_t
  716. +static uint16_t
  717. virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  718. struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
  719. int16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
  720. @@ -3863,7 +3863,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
  721. return nr_done_pkts;
  722. }
  723.  
  724. -__rte_noinline
  725. +
  726. static uint16_t
  727. virtio_dev_tx_async_split_legacy(struct virtio_net *dev,
  728. struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
  729. @@ -3876,7 +3876,7 @@ virtio_dev_tx_async_split_legacy(struct virtio_net *dev,
  730. pkts, count, dma_id, vchan_id, true);
  731. }
  732.  
  733. -__rte_noinline
  734. +
  735. static uint16_t
  736. virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
  737. struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
  738. @@ -3889,7 +3889,7 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
  739. pkts, count, dma_id, vchan_id, false);
  740. }
  741.  
  742. -static __rte_always_inline void
  743. +static void
  744. vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
  745. uint16_t buf_id, uint16_t count)
  746. __rte_shared_locks_required(&vq->access_lock)
  747. @@ -3907,7 +3907,7 @@ vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
  748.  
  749. }
  750.  
  751. -static __rte_always_inline int
  752. +static int
  753. virtio_dev_tx_async_single_packed(struct virtio_net *dev,
  754. struct vhost_virtqueue *vq,
  755. struct rte_mempool *mbuf_pool,
  756. @@ -3962,7 +3962,7 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
  757. return err;
  758. }
  759.  
  760. -static __rte_always_inline int
  761. +static int
  762. virtio_dev_tx_async_packed_batch(struct virtio_net *dev,
  763. struct vhost_virtqueue *vq,
  764. struct rte_mbuf **pkts, uint16_t slot_idx,
  765. @@ -4021,7 +4021,7 @@ virtio_dev_tx_async_packed_batch(struct virtio_net *dev,
  766. return 0;
  767. }
  768.  
  769. -static __rte_always_inline uint16_t
  770. +static uint16_t
  771. virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  772. struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
  773. uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
  774. @@ -4133,7 +4133,7 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
  775. return nr_done_pkts;
  776. }
  777.  
  778. -__rte_noinline
  779. +
  780. static uint16_t
  781. virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct vhost_virtqueue *vq,
  782. struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
  783. @@ -4145,7 +4145,7 @@ virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct vhost_virtqueue
  784. pkts, count, dma_id, vchan_id, true);
  785. }
  786.  
  787. -__rte_noinline
  788. +
  789. static uint16_t
  790. virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct vhost_virtqueue *vq,
  791. struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement