From: beilei.xing@intel.com
To: jingjing.wu@intel.com, qi.z.zhang@intel.com
Cc: dev@dpdk.org, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH 14/15] common/idpf: add vec queue setup
Date: Thu, 8 Dec 2022 07:53:08 +0000 [thread overview]
Message-ID: <20221208075309.37852-15-beilei.xing@intel.com> (raw)
In-Reply-To: <20221208075309.37852-1-beilei.xing@intel.com>
From: Beilei Xing <beilei.xing@intel.com>
Add vector queue setup for single queue model.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_rxtx.c | 57 ++++++++++++++++++++++++++
drivers/common/idpf/idpf_common_rxtx.h | 2 +
drivers/common/idpf/version.map | 1 +
drivers/net/idpf/idpf_rxtx.c | 57 --------------------------
drivers/net/idpf/idpf_rxtx.h | 1 -
5 files changed, 60 insertions(+), 58 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index 3030f89bf1..7618819e68 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -1397,3 +1397,60 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+
+static void __rte_cold
+release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
+{
+ const uint16_t mask = rxq->nb_rx_desc - 1;
+ uint16_t i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i] != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ } else {
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) {
+ if (rxq->sw_ring[i] != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static const struct idpf_rxq_ops def_singleq_rx_ops_vec = {
+ .release_mbufs = release_rxq_mbufs_vec,
+};
+
+static inline int
+idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+int __rte_cold
+idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
+{
+ rxq->ops = &def_singleq_rx_ops_vec;
+ return idpf_singleq_rx_vec_setup_default(rxq);
+}
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 2da2a6dc49..ea6c6b78e9 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -252,5 +252,7 @@ uint16_t idpf_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
__rte_internal
uint16_t idpf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+__rte_internal
+int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
#endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index bc2a069735..d022c72971 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,7 @@ INTERNAL {
idpf_singleq_recv_pkts;
idpf_singleq_xmit_pkts;
idpf_prep_pkts;
+ idpf_singleq_rx_vec_setup;
local: *;
};
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index fbf2a8f0cd..b6d4e5abd0 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -746,63 +746,6 @@ idpf_stop_queues(struct rte_eth_dev *dev)
}
}
-static void __rte_cold
-release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
-{
- const uint16_t mask = rxq->nb_rx_desc - 1;
- uint16_t i;
-
- if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
- return;
-
- /* free all mbufs that are valid in the ring */
- if (rxq->rxrearm_nb == 0) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_ring[i] != NULL)
- rte_pktmbuf_free_seg(rxq->sw_ring[i]);
- }
- } else {
- for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) {
- if (rxq->sw_ring[i] != NULL)
- rte_pktmbuf_free_seg(rxq->sw_ring[i]);
- }
- }
-
- rxq->rxrearm_nb = rxq->nb_rx_desc;
-
- /* set all entries to NULL */
- memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
-}
-
-static const struct idpf_rxq_ops def_singleq_rx_ops_vec = {
- .release_mbufs = release_rxq_mbufs_vec,
-};
-
-static inline int
-idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
-{
- uintptr_t p;
- struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
- mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
- rte_mbuf_refcnt_set(&mb_def, 1);
-
- /* prevent compiler reordering: rearm_data covers previous fields */
- rte_compiler_barrier();
- p = (uintptr_t)&mb_def.rearm_data;
- rxq->mbuf_initializer = *(uint64_t *)p;
- return 0;
-}
-
-int __rte_cold
-idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
-{
- rxq->ops = &def_singleq_rx_ops_vec;
- return idpf_singleq_rx_vec_setup_default(rxq);
-}
-
void
idpf_set_rx_function(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index eab363c3e7..a985dc2cf5 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -44,7 +44,6 @@ void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-int idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq);
int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
--
2.26.2
next prev parent reply other threads:[~2022-12-08 7:55 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-08 7:52 [PATCH 00/15] net/idpf: refactor idpf pmd beilei.xing
2022-12-08 7:52 ` [PATCH 01/15] common/idpf: add adapter structure beilei.xing
2022-12-08 7:52 ` [PATCH 02/15] common/idpf: add vport structure beilei.xing
2022-12-08 7:52 ` [PATCH 03/15] common/idpf: move vc functions to common module beilei.xing
2022-12-08 7:52 ` [PATCH 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2022-12-08 7:52 ` [PATCH 05/15] common/idpf: add vport init/deinit beilei.xing
2022-12-08 7:53 ` [PATCH 06/15] common/idpf: add config RSS beilei.xing
2022-12-08 7:53 ` [PATCH 07/15] common/idpf: add irq map/unmap beilei.xing
2022-12-08 7:53 ` [PATCH 08/15] common/idpf: move ptype table to adapter structure beilei.xing
2022-12-08 7:53 ` [PATCH 09/15] common/idpf: init create vport info beilei.xing
2022-12-08 7:53 ` [PATCH 10/15] common/idpf: add vector flags in vport beilei.xing
2022-12-08 7:53 ` [PATCH 11/15] common/idpf: add rxq and txq struct beilei.xing
2022-12-08 7:53 ` [PATCH 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2022-12-08 7:53 ` [PATCH 13/15] common/idpf: add scalar data path beilei.xing
2022-12-08 7:53 ` beilei.xing [this message]
2022-12-08 7:53 ` [PATCH 15/15] common/idpf: add avx512 for single queue model beilei.xing
2023-01-06 9:16 ` [PATCH v2 00/15] net/idpf: introduce idpf common modle beilei.xing
2023-01-06 9:16 ` [PATCH v2 01/15] common/idpf: add adapter structure beilei.xing
2023-01-06 9:16 ` [PATCH v2 02/15] common/idpf: add vport structure beilei.xing
2023-01-06 9:16 ` [PATCH v2 03/15] common/idpf: move vc functions to common module beilei.xing
2023-01-06 9:16 ` [PATCH v2 04/15] common/idpf: introduce adapter init and deinit beilei.xing
2023-01-06 9:16 ` [PATCH v2 05/15] common/idpf: add vport init/deinit beilei.xing
2023-01-08 12:10 ` Zhang, Qi Z
2023-01-09 1:34 ` Xing, Beilei
2023-01-06 9:16 ` [PATCH v2 06/15] common/idpf: add config RSS beilei.xing
2023-01-06 9:16 ` [PATCH v2 07/15] common/idpf: add irq map/unmap beilei.xing
2023-01-17 1:14 ` Zhang, Qi Z
2023-01-06 9:16 ` [PATCH v2 08/15] common/idpf: move ptype table to adapter structure beilei.xing
2023-01-06 9:16 ` [PATCH v2 09/15] common/idpf: init create vport info beilei.xing
2023-01-06 9:16 ` [PATCH v2 10/15] common/idpf: add vector flags in vport beilei.xing
2023-01-06 9:16 ` [PATCH v2 11/15] common/idpf: add rxq and txq struct beilei.xing
2023-01-06 9:16 ` [PATCH v2 12/15] common/idpf: add help functions for queue setup and release beilei.xing
2023-01-06 9:16 ` [PATCH v2 13/15] common/idpf: add scalar data path beilei.xing
2023-01-06 9:16 ` [PATCH v2 14/15] common/idpf: add vec queue setup beilei.xing
2023-01-06 9:16 ` [PATCH v2 15/15] common/idpf: add avx512 for single queue model beilei.xing
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221208075309.37852-15-beilei.xing@intel.com \
--to=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).