DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/idpf: optimize Tx/Rx qeue mode code
@ 2023-03-01 16:16 Mingxia Liu
  2023-03-01 19:26 ` [PATCH v2] net/idpf: refine Rx/Tx queue model info Mingxia Liu
  0 siblings, 1 reply; 9+ messages in thread
From: Mingxia Liu @ 2023-03-01 16:16 UTC (permalink / raw)
  To: dev; +Cc: jingjing.wu, beilei.xing, Mingxia Liu

This patch update queue mode info in struct idpf_adapter.
Using is_rx_singleq_model to diffentiate rx_singq and rx_splitq
explicitly, instead of deducing it from pointer values.

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
 drivers/common/idpf/idpf_common_device.c | 4 ++--
 drivers/common/idpf/idpf_common_device.h | 4 ++--
 drivers/common/idpf/idpf_common_rxtx.c   | 2 +-
 drivers/common/idpf/idpf_common_rxtx.h   | 4 ++++
 drivers/net/idpf/idpf_ethdev.c           | 4 ++--
 drivers/net/idpf/idpf_rxtx.c             | 6 +++---
 6 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index 5475a3e52c..e5c13581fd 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -623,7 +623,7 @@ idpf_vport_info_init(struct idpf_vport *vport,
 	struct idpf_adapter *adapter = vport->adapter;
 
 	vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
-	if (adapter->txq_model == 0) {
+	if (!adapter->is_tx_singleq_model) {
 		vport_info->txq_model =
 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
 		vport_info->num_tx_q =
@@ -636,7 +636,7 @@ idpf_vport_info_init(struct idpf_vport *vport,
 		vport_info->num_tx_q = rte_cpu_to_le_16(IDPF_DEFAULT_TXQ_NUM);
 		vport_info->num_tx_complq = 0;
 	}
-	if (adapter->rxq_model == 0) {
+	if (!adapter->is_rx_singleq_model) {
 		vport_info->rxq_model =
 			rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
 		vport_info->num_rx_q = rte_cpu_to_le_16(IDPF_DEFAULT_RXQ_NUM);
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 364a60221a..b9ee20744d 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -43,8 +43,8 @@ struct idpf_adapter {
 
 	uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 
-	uint32_t txq_model; /* 0 - split queue model, non-0 - single queue model */
-	uint32_t rxq_model; /* 0 - split queue model, non-0 - single queue model */
+	bool is_tx_singleq_model; /* true - single queue model, false - split queue model */
+	bool is_rx_singleq_model; /* true - single queue model, false - split queue model */
 
 	/* For timestamp */
 	uint64_t time_hw;
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index d7e8df1895..08c06cae4a 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -309,7 +309,7 @@ idpf_qc_rx_queue_release(void *rxq)
 		return;
 
 	/* Split queue */
-	if (q->bufq1 != NULL && q->bufq2 != NULL) {
+	if (!q->adapter->is_rx_singleq_model) {
 		q->bufq1->ops->release_mbufs(q->bufq1);
 		rte_free(q->bufq1->sw_ring);
 		rte_memzone_free(q->bufq1->mz);
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 7e6df080e6..0eb1c96852 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -90,6 +90,10 @@
 #define PF_GLTSYN_SHTIME_L_5	(PF_TIMESYNC_BAR4_BASE + 0x138)
 #define PF_GLTSYN_SHTIME_H_5	(PF_TIMESYNC_BAR4_BASE + 0x13C)
 
+enum idpf_rx_split_bufq_id {
+	IDPF_SPLIT_BUFQ1_ID = 1,
+	IDPF_SPLIT_BUFQ2_ID = 2
+};
 struct idpf_rx_stats {
 	uint64_t mbuf_alloc_failed;
 };
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 5b4f4fd82b..dafb3179ab 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -993,12 +993,12 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap
 		goto bail;
 
 	ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
-				 &adapter->base.txq_model);
+				 &adapter->base.is_tx_singleq_model);
 	if (ret != 0)
 		goto bail;
 
 	ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
-				 &adapter->base.rxq_model);
+				 &adapter->base.is_rx_singleq_model);
 	if (ret != 0)
 		goto bail;
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index d16acd87fb..b0f68baa28 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -189,9 +189,9 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
 	bufq->ops = &def_rxq_ops;
 	bufq->q_set = true;
 
-	if (bufq_id == 1) {
+	if (bufq_id == IDPF_SPLIT_BUFQ1_ID) {
 		rxq->bufq1 = bufq;
-	} else if (bufq_id == 2) {
+	} else if (bufq_id == IDPF_SPLIT_BUFQ2_ID) {
 		rxq->bufq2 = bufq;
 	} else {
 		PMD_INIT_LOG(ERR, "Invalid buffer queue index.");
@@ -536,7 +536,7 @@ idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		return -EIO;
 	}
 
-	if (rxq->bufq1 == NULL) {
+	if (rxq->adapter->is_rx_singleq_model) {
 		/* Single queue */
 		err = idpf_qc_single_rxq_mbufs_alloc(rxq);
 		if (err != 0) {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2023-03-02 14:42 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-01 16:16 [PATCH] net/idpf: optimize Tx/Rx qeue mode code Mingxia Liu
2023-03-01 19:26 ` [PATCH v2] net/idpf: refine Rx/Tx queue model info Mingxia Liu
2023-03-02  9:46   ` Zhang, Qi Z
2023-03-02 10:06     ` Ferruh Yigit
2023-03-02 10:28       ` Zhang, Qi Z
2023-03-02 11:38         ` Liu, Mingxia
2023-03-02 12:08   ` Xing, Beilei
2023-03-02 19:51   ` [PATCH v3] " Mingxia Liu
2023-03-02 14:42     ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).