DPDK patches and discussions
 help / color / mirror / Atom feed
From: beilei.xing@intel.com
To: jingjing.wu@intel.com
Cc: dev@dpdk.org, mingxia.liu@intel.com, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH 01/10] net/cpfl: refine structures
Date: Fri, 21 Apr 2023 06:50:39 +0000	[thread overview]
Message-ID: <20230421065048.106899-2-beilei.xing@intel.com> (raw)
In-Reply-To: <20230421065048.106899-1-beilei.xing@intel.com>

From: Beilei Xing <beilei.xing@intel.com>

This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.

Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c          |  85 +++++++-----
 drivers/net/cpfl/cpfl_ethdev.h          |   6 +-
 drivers/net/cpfl/cpfl_rxtx.c            | 175 +++++++++++++++++-------
 drivers/net/cpfl/cpfl_rxtx.h            |   8 ++
 drivers/net/cpfl/cpfl_rxtx_vec_common.h |  17 +--
 5 files changed, 196 insertions(+), 95 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 306b8ad769..4a507f05d5 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
 		     __rte_unused int wait_to_complete)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct rte_eth_link new_link;
 	unsigned int i;
 
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
 static int
 cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 
 	dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 static int
 cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 
 	/* mtu setting is forbidden if port is start */
 	if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
 cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 {
 	uint64_t mbuf_alloc_failed = 0;
-	struct idpf_rx_queue *rxq;
+	struct cpfl_rx_queue *cpfl_rxq;
 	int i = 0;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-		mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+		cpfl_rxq = dev->data->rx_queues[i];
+		mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
 						     __ATOMIC_RELAXED);
 	}
 
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 static int
 cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-	struct idpf_vport *vport =
-		(struct idpf_vport *)dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct virtchnl2_vport_stats *pstats = NULL;
 	int ret;
 
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 static void
 cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 {
-	struct idpf_rx_queue *rxq;
+	struct cpfl_rx_queue *cpfl_rxq;
 	int i;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-		__atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+		cpfl_rxq = dev->data->rx_queues[i];
+		__atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
 	}
 }
 
 static int
 cpfl_dev_stats_reset(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport =
-		(struct idpf_vport *)dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct virtchnl2_vport_stats *pstats = NULL;
 	int ret;
 
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
 static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
 			       struct rte_eth_xstat *xstats, unsigned int n)
 {
-	struct idpf_vport *vport =
-		(struct idpf_vport *)dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct virtchnl2_vport_stats *pstats = NULL;
 	unsigned int i;
 	int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
 		     struct rte_eth_rss_reta_entry64 *reta_conf,
 		     uint16_t reta_size)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	uint16_t idx, shift;
 	int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
 		    struct rte_eth_rss_reta_entry64 *reta_conf,
 		    uint16_t reta_size)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	uint16_t idx, shift;
 	int ret = 0;
@@ -536,7 +541,8 @@ static int
 cpfl_rss_hash_update(struct rte_eth_dev *dev,
 		     struct rte_eth_rss_conf *rss_conf)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	int ret = 0;
 
@@ -601,7 +607,8 @@ static int
 cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
 		       struct rte_eth_rss_conf *rss_conf)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	int ret = 0;
 
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
 static int
 cpfl_dev_configure(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct rte_eth_conf *conf = &dev->data->dev_conf;
 	struct idpf_adapter *base = vport->adapter;
 	int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
 static int
 cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	uint16_t nb_rx_queues = dev->data->nb_rx_queues;
 
 	return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
 static int
 cpfl_start_queues(struct rte_eth_dev *dev)
 {
-	struct idpf_rx_queue *rxq;
-	struct idpf_tx_queue *txq;
+	struct cpfl_rx_queue *cpfl_rxq;
+	struct cpfl_tx_queue *cpfl_txq;
 	int err = 0;
 	int i;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-		if (txq == NULL || txq->tx_deferred_start)
+		cpfl_txq = dev->data->tx_queues[i];
+		if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
 			continue;
 		err = cpfl_tx_queue_start(dev, i);
 		if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
 	}
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-		if (rxq == NULL || rxq->rx_deferred_start)
+		cpfl_rxq = dev->data->rx_queues[i];
+		if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
 			continue;
 		err = cpfl_rx_queue_start(dev, i);
 		if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
 static int
 cpfl_dev_start(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
 	uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -815,7 +825,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
 static int
 cpfl_dev_stop(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 
 	if (vport->stopped == 1)
 		return 0;
@@ -836,7 +847,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
 static int
 cpfl_dev_close(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
 
 	cpfl_dev_stop(dev);
@@ -846,7 +858,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
 	adapter->cur_vport_nb--;
 	dev->data->dev_private = NULL;
 	adapter->vports[vport->sw_idx] = NULL;
-	rte_free(vport);
+	rte_free(cpfl_vport);
 
 	return 0;
 }
@@ -1051,7 +1063,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 	int i;
 
 	for (i = 0; i < adapter->cur_vport_nb; i++) {
-		vport = adapter->vports[i];
+		vport = &adapter->vports[i]->base;
 		if (vport->vport_id != vport_id)
 			continue;
 		else
@@ -1328,7 +1340,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
 static int
 cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct cpfl_vport_param *param = init_params;
 	struct cpfl_adapter_ext *adapter = param->adapter;
 	/* for sending create vport virtchnl msg prepare */
@@ -1354,7 +1367,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
 		goto err;
 	}
 
-	adapter->vports[param->idx] = vport;
+	adapter->vports[param->idx] = cpfl_vport;
 	adapter->cur_vports |= RTE_BIT32(param->devarg_id);
 	adapter->cur_vport_nb++;
 
@@ -1470,7 +1483,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 		snprintf(name, sizeof(name), "cpfl_%s_vport_0",
 			 pci_dev->device.name);
 		retval = rte_eth_dev_create(&pci_dev->device, name,
-					    sizeof(struct idpf_vport),
+					    sizeof(struct cpfl_vport),
 					    NULL, NULL, cpfl_dev_vport_init,
 					    &vport_param);
 		if (retval != 0)
@@ -1488,7 +1501,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 				 pci_dev->device.name,
 				 devargs.req_vports[i]);
 			retval = rte_eth_dev_create(&pci_dev->device, name,
-						    sizeof(struct idpf_vport),
+						    sizeof(struct cpfl_vport),
 						    NULL, NULL, cpfl_dev_vport_init,
 						    &vport_param);
 			if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
 	uint16_t req_vport_nb;
 };
 
+struct cpfl_vport {
+	struct idpf_vport base;
+};
+
 struct cpfl_adapter_ext {
 	TAILQ_ENTRY(cpfl_adapter_ext) next;
 	struct idpf_adapter base;
 
 	char name[CPFL_ADAPTER_NAME_LEN];
 
-	struct idpf_vport **vports;
+	struct cpfl_vport **vports;
 	uint16_t max_vport_nb;
 
 	uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index de59b31b3d..a441e2ffbe 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
 			 uint16_t nb_desc, unsigned int socket_id,
 			 struct rte_mempool *mp, uint8_t bufq_id)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	struct idpf_hw *hw = &base->hw;
 	const struct rte_memzone *mz;
@@ -219,15 +220,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
 	rte_free(bufq);
 }
 
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+	struct cpfl_rx_queue *cpfl_rxq = rxq;
+	struct idpf_rx_queue *q = NULL;
+
+	if (cpfl_rxq == NULL)
+		return;
+
+	q = &cpfl_rxq->base;
+
+	/* Split queue */
+	if (!q->adapter->is_rx_singleq) {
+		if (q->bufq2)
+			cpfl_rx_split_bufq_release(q->bufq2);
+
+		if (q->bufq1)
+			cpfl_rx_split_bufq_release(q->bufq1);
+
+		rte_free(cpfl_rxq);
+		return;
+	}
+
+	/* Single queue */
+	q->ops->release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+	struct cpfl_tx_queue *cpfl_txq = txq;
+	struct idpf_tx_queue *q = NULL;
+
+	if (cpfl_txq == NULL)
+		return;
+
+	q = &cpfl_txq->base;
+
+	if (q->complq) {
+		rte_memzone_free(q->complq->mz);
+		rte_free(q->complq);
+	}
+
+	q->ops->release_mbufs(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_free(cpfl_txq);
+}
+
 int
 cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		    uint16_t nb_desc, unsigned int socket_id,
 		    const struct rte_eth_rxconf *rx_conf,
 		    struct rte_mempool *mp)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	struct idpf_hw *hw = &base->hw;
+	struct cpfl_rx_queue *cpfl_rxq;
 	const struct rte_memzone *mz;
 	struct idpf_rx_queue *rxq;
 	uint16_t rx_free_thresh;
@@ -247,21 +302,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	/* Free memory if needed */
 	if (dev->data->rx_queues[queue_idx] != NULL) {
-		idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+		cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
 		dev->data->rx_queues[queue_idx] = NULL;
 	}
 
 	/* Setup Rx queue */
-	rxq = rte_zmalloc_socket("cpfl rxq",
-				 sizeof(struct idpf_rx_queue),
+	cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+				 sizeof(struct cpfl_rx_queue),
 				 RTE_CACHE_LINE_SIZE,
 				 socket_id);
-	if (rxq == NULL) {
+	if (cpfl_rxq == NULL) {
 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
 		ret = -ENOMEM;
 		goto err_rxq_alloc;
 	}
 
+	rxq = &cpfl_rxq->base;
+
 	is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
 
 	rxq->mp = mp;
@@ -328,7 +385,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	}
 
 	rxq->q_set = true;
-	dev->data->rx_queues[queue_idx] = rxq;
+	dev->data->rx_queues[queue_idx] = cpfl_rxq;
 
 	return 0;
 
@@ -348,7 +405,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
 		     uint16_t queue_idx, uint16_t nb_desc,
 		     unsigned int socket_id)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	const struct rte_memzone *mz;
 	struct idpf_tx_queue *cq;
 	int ret;
@@ -396,9 +454,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		    uint16_t nb_desc, unsigned int socket_id,
 		    const struct rte_eth_txconf *tx_conf)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 	struct idpf_adapter *base = vport->adapter;
 	uint16_t tx_rs_thresh, tx_free_thresh;
+	struct cpfl_tx_queue *cpfl_txq;
 	struct idpf_hw *hw = &base->hw;
 	const struct rte_memzone *mz;
 	struct idpf_tx_queue *txq;
@@ -418,21 +478,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
 	/* Free memory if needed. */
 	if (dev->data->tx_queues[queue_idx] != NULL) {
-		idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+		cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
 		dev->data->tx_queues[queue_idx] = NULL;
 	}
 
 	/* Allocate the TX queue data structure. */
-	txq = rte_zmalloc_socket("cpfl txq",
-				 sizeof(struct idpf_tx_queue),
+	cpfl_txq = rte_zmalloc_socket("cpfl txq",
+				 sizeof(struct cpfl_tx_queue),
 				 RTE_CACHE_LINE_SIZE,
 				 socket_id);
-	if (txq == NULL) {
+	if (cpfl_txq == NULL) {
 		PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
 		ret = -ENOMEM;
 		goto err_txq_alloc;
 	}
 
+	txq = &cpfl_txq->base;
+
 	is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
 
 	txq->nb_tx_desc = nb_desc;
@@ -486,7 +548,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			queue_idx * vport->chunks_info.tx_qtail_spacing);
 	txq->ops = &def_txq_ops;
 	txq->q_set = true;
-	dev->data->tx_queues[queue_idx] = txq;
+	dev->data->tx_queues[queue_idx] = cpfl_txq;
 
 	return 0;
 
@@ -502,6 +564,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 int
 cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
+	struct cpfl_rx_queue *cpfl_rxq;
 	struct idpf_rx_queue *rxq;
 	uint16_t max_pkt_len;
 	uint32_t frame_size;
@@ -510,7 +573,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	if (rx_queue_id >= dev->data->nb_rx_queues)
 		return -EINVAL;
 
-	rxq = dev->data->rx_queues[rx_queue_id];
+	cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+	rxq = &cpfl_rxq->base;
 
 	if (rxq == NULL || !rxq->q_set) {
 		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -574,9 +638,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 int
 cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
-	struct idpf_rx_queue *rxq =
-		dev->data->rx_queues[rx_queue_id];
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
+	struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+	struct idpf_rx_queue *rxq = &cpfl_rxq->base;
 	int err = 0;
 
 	err = idpf_vc_rxq_config(vport, rxq);
@@ -609,15 +674,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 int
 cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-	struct idpf_tx_queue *txq;
+	struct cpfl_tx_queue *cpfl_txq;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -EINVAL;
 
-	txq = dev->data->tx_queues[tx_queue_id];
+	cpfl_txq = dev->data->tx_queues[tx_queue_id];
 
 	/* Init the RX tail register. */
-	IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+	IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
 
 	return 0;
 }
@@ -625,12 +690,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 int
 cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
-	struct idpf_tx_queue *txq =
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
+	struct cpfl_tx_queue *cpfl_txq =
 		dev->data->tx_queues[tx_queue_id];
 	int err = 0;
 
-	err = idpf_vc_txq_config(vport, txq);
+	err = idpf_vc_txq_config(vport, &cpfl_txq->base);
 	if (err != 0) {
 		PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
 		return err;
@@ -649,7 +715,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
 			    tx_queue_id);
 	} else {
-		txq->q_started = true;
+		cpfl_txq->base.q_started = true;
 		dev->data->tx_queue_state[tx_queue_id] =
 			RTE_ETH_QUEUE_STATE_STARTED;
 	}
@@ -660,13 +726,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 int
 cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
+	struct cpfl_rx_queue *cpfl_rxq;
 	struct idpf_rx_queue *rxq;
 	int err;
 
 	if (rx_queue_id >= dev->data->nb_rx_queues)
 		return -EINVAL;
 
+	cpfl_rxq = dev->data->rx_queues[rx_queue_id];
 	err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
 	if (err != 0) {
 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -674,7 +743,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		return err;
 	}
 
-	rxq = dev->data->rx_queues[rx_queue_id];
+	rxq = &cpfl_rxq->base;
 	rxq->q_started = false;
 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
 		rxq->ops->release_mbufs(rxq);
@@ -692,13 +761,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 int
 cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
+	struct cpfl_tx_queue *cpfl_txq;
 	struct idpf_tx_queue *txq;
 	int err;
 
 	if (tx_queue_id >= dev->data->nb_tx_queues)
 		return -EINVAL;
 
+	cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
 	err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
 	if (err != 0) {
 		PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -706,7 +779,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 		return err;
 	}
 
-	txq = dev->data->tx_queues[tx_queue_id];
+	txq = &cpfl_txq->base;
 	txq->q_started = false;
 	txq->ops->release_mbufs(txq);
 	if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -723,25 +796,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 void
 cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-	idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+	cpfl_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 void
 cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-	idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+	cpfl_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 void
 cpfl_stop_queues(struct rte_eth_dev *dev)
 {
-	struct idpf_rx_queue *rxq;
-	struct idpf_tx_queue *txq;
+	struct cpfl_rx_queue *cpfl_rxq;
+	struct cpfl_tx_queue *cpfl_txq;
 	int i;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-		if (rxq == NULL)
+		cpfl_rxq = dev->data->rx_queues[i];
+		if (cpfl_rxq == NULL)
 			continue;
 
 		if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -749,8 +822,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
 	}
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-		if (txq == NULL)
+		cpfl_txq = dev->data->tx_queues[i];
+		if (cpfl_txq == NULL)
 			continue;
 
 		if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -761,9 +834,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
 void
 cpfl_set_rx_function(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 #ifdef RTE_ARCH_X86
-	struct idpf_rx_queue *rxq;
+	struct cpfl_rx_queue *cpfl_rxq;
 	int i;
 
 	if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -789,8 +863,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
 	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
 		if (vport->rx_vec_allowed) {
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				rxq = dev->data->rx_queues[i];
-				(void)idpf_qc_splitq_rx_vec_setup(rxq);
+				cpfl_rxq = dev->data->rx_queues[i];
+				(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
 			}
 #ifdef CC_AVX512_SUPPORT
 			if (vport->rx_use_avx512) {
@@ -809,8 +883,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
 	} else {
 		if (vport->rx_vec_allowed) {
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				rxq = dev->data->rx_queues[i];
-				(void)idpf_qc_singleq_rx_vec_setup(rxq);
+				cpfl_rxq = dev->data->rx_queues[i];
+				(void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
 			}
 #ifdef CC_AVX512_SUPPORT
 			if (vport->rx_use_avx512) {
@@ -859,10 +933,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
 void
 cpfl_set_tx_function(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
-	struct idpf_tx_queue *txq;
+	struct cpfl_tx_queue *cpfl_txq;
 	int i;
 #endif /* CC_AVX512_SUPPORT */
 
@@ -877,8 +952,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
 				vport->tx_use_avx512 = true;
 			if (vport->tx_use_avx512) {
 				for (i = 0; i < dev->data->nb_tx_queues; i++) {
-					txq = dev->data->tx_queues[i];
-					idpf_qc_tx_vec_avx512_setup(txq);
+					cpfl_txq = dev->data->tx_queues[i];
+					idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
 				}
 			}
 		}
@@ -915,10 +990,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
 			if (vport->tx_use_avx512) {
 				for (i = 0; i < dev->data->nb_tx_queues; i++) {
-					txq = dev->data->tx_queues[i];
-					if (txq == NULL)
+					cpfl_txq = dev->data->tx_queues[i];
+					if (cpfl_txq == NULL)
 						continue;
-					idpf_qc_tx_vec_avx512_setup(txq);
+					idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
 				}
 				PMD_DRV_LOG(NOTICE,
 					    "Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
 
 #define CPFL_SUPPORT_CHAIN_NUM 5
 
+struct cpfl_rx_queue {
+	struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+	struct idpf_tx_queue base;
+};
+
 int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 			uint16_t nb_desc, unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
 static inline int
 cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
-	struct idpf_vport *vport = dev->data->dev_private;
-	struct idpf_rx_queue *rxq;
+	struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+	struct idpf_vport *vport = &cpfl_vport->base;
+	struct cpfl_rx_queue *cpfl_rxq;
 	int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxq = dev->data->rx_queues[i];
-		default_ret = cpfl_rx_vec_queue_default(rxq);
+		cpfl_rxq = dev->data->rx_queues[i];
+		default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
 		if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
-			splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+			splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
 			ret = splitq_ret && default_ret;
 		} else {
 			ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
 cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
-	struct idpf_tx_queue *txq;
+	struct cpfl_tx_queue *cpfl_txq;
 	int ret = 0;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txq = dev->data->tx_queues[i];
-		ret = cpfl_tx_vec_queue_default(txq);
+		cpfl_txq = dev->data->tx_queues[i];
+		ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
 		if (ret == CPFL_SCALAR_PATH)
 			return CPFL_SCALAR_PATH;
 	}
-- 
2.26.2


  reply	other threads:[~2023-04-21  7:13 UTC|newest]

Thread overview: 164+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-21  6:50 [PATCH 00/10] add hairpin queue support beilei.xing
2023-04-21  6:50 ` beilei.xing [this message]
2023-04-21  6:50 ` [PATCH 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
2023-04-21  6:50 ` [PATCH 03/10] common/idpf: support queue groups add/delete beilei.xing
2023-04-24  8:48   ` Liu, Mingxia
2023-04-24  8:49   ` Liu, Mingxia
2023-05-19  5:36     ` Xing, Beilei
2023-04-21  6:50 ` [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
2023-04-24  8:55   ` Liu, Mingxia
2023-05-19  5:36     ` Xing, Beilei
2023-04-21  6:50 ` [PATCH 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
2023-04-21  6:50 ` [PATCH 06/10] net/cpfl: support hairpin queue configuration beilei.xing
2023-04-24  9:48   ` Liu, Mingxia
2023-05-19  5:43     ` Xing, Beilei
2023-04-21  6:50 ` [PATCH 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
2023-04-21  6:50 ` [PATCH 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
2023-04-21  6:50 ` [PATCH 09/10] net/cpfl: support peer ports get beilei.xing
2023-04-21  6:50 ` [PATCH 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-19  5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
2023-05-19  5:10   ` [PATCH v2 01/10] net/cpfl: refine structures beilei.xing
2023-05-19  5:10   ` [PATCH v2 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-19  5:10   ` [PATCH v2 03/10] common/idpf: support queue groups add/delete beilei.xing
2023-05-19  5:10   ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-19  5:10   ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
2023-05-19  5:10   ` [PATCH v2 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-19  5:10   ` [PATCH v2 06/10] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-19  5:10   ` [PATCH v2 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-19  5:10   ` [PATCH v2 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-19  5:10   ` [PATCH v2 09/10] net/cpfl: support peer ports get beilei.xing
2023-05-19  5:10   ` [PATCH v2 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-19  7:31   ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
2023-05-19  7:31     ` [PATCH v3 01/10] net/cpfl: refine structures beilei.xing
2023-05-19  7:31     ` [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-24 14:30       ` Wu, Jingjing
2023-05-19  7:31     ` [PATCH v3 03/10] common/idpf: support queue groups add/delete beilei.xing
2023-05-19  7:31     ` [PATCH v3 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-24 14:38       ` Wu, Jingjing
2023-05-19  7:31     ` [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-24  9:01       ` Liu, Mingxia
2023-05-26  3:46         ` Xing, Beilei
2023-05-25  3:58       ` Wu, Jingjing
2023-05-26  3:52         ` Xing, Beilei
2023-05-19  7:31     ` [PATCH v3 06/10] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-19  7:31     ` [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-25  4:12       ` Wu, Jingjing
2023-05-19  7:31     ` [PATCH v3 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-25  4:17       ` Wu, Jingjing
2023-05-19  7:31     ` [PATCH v3 09/10] net/cpfl: support peer ports get beilei.xing
2023-05-25  5:26       ` Wu, Jingjing
2023-05-19  7:31     ` [PATCH v3 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-26  7:38     ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-26  7:38       ` [PATCH v4 01/13] net/cpfl: refine structures beilei.xing
2023-05-26  7:38       ` [PATCH v4 02/13] common/idpf: support queue groups add/delete beilei.xing
2023-05-26  7:38       ` [PATCH v4 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-26  7:38       ` [PATCH v4 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-26  7:38       ` [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-30  2:27         ` Liu, Mingxia
2023-05-30  2:49           ` Liu, Mingxia
2023-05-31 10:53             ` Xing, Beilei
2023-05-26  7:38       ` [PATCH v4 06/13] common/idpf: add queue config API beilei.xing
2023-05-26  7:38       ` [PATCH v4 07/13] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-26  7:38       ` [PATCH v4 08/13] common/idpf: add switch queue API beilei.xing
2023-05-26  7:38       ` [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-30  3:30         ` Liu, Mingxia
2023-05-31 10:53           ` Xing, Beilei
2023-05-26  7:38       ` [PATCH v4 10/13] common/idpf: add irq map config API beilei.xing
2023-05-26  7:38       ` [PATCH v4 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-26  7:38       ` [PATCH v4 12/13] net/cpfl: support peer ports get beilei.xing
2023-05-26  7:38       ` [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-30  3:59         ` Liu, Mingxia
2023-05-31 10:54           ` Xing, Beilei
2023-05-31 10:18       ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:18         ` [PATCH v5 01/13] net/cpfl: refine structures beilei.xing
2023-05-31 10:18         ` [PATCH v5 02/13] common/idpf: support queue groups add/delete beilei.xing
2023-05-31 10:18         ` [PATCH v5 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-31 10:18         ` [PATCH v5 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-31 10:18         ` [PATCH v5 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-31 10:18         ` [PATCH v5 06/13] common/idpf: add queue config API beilei.xing
2023-05-31 10:18         ` [PATCH v5 07/13] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-31 10:18         ` [PATCH v5 08/13] common/idpf: add switch queue API beilei.xing
2023-05-31 10:18         ` [PATCH v5 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-31 10:18         ` [PATCH v5 10/13] common/idpf: add irq map config API beilei.xing
2023-05-31 10:18         ` [PATCH v5 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-31 10:18         ` [PATCH v5 12/13] net/cpfl: support peer ports get beilei.xing
2023-05-31 10:18         ` [PATCH v5 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 10:25         ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:25           ` [PATCH v6 01/13] net/cpfl: refine structures beilei.xing
2023-05-31 10:25           ` [PATCH v6 02/13] common/idpf: support queue groups add/delete beilei.xing
2023-05-31 10:25           ` [PATCH v6 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-31 10:25           ` [PATCH v6 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-31 10:25           ` [PATCH v6 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-31 10:25           ` [PATCH v6 06/13] common/idpf: add queue config API beilei.xing
2023-05-31 10:25           ` [PATCH v6 07/13] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-31 10:25           ` [PATCH v6 08/13] common/idpf: add switch queue API beilei.xing
2023-05-31 10:25           ` [PATCH v6 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-31 10:25           ` [PATCH v6 10/13] common/idpf: add irq map config API beilei.xing
2023-05-31 10:25           ` [PATCH v6 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-31 10:25           ` [PATCH v6 12/13] net/cpfl: support peer ports get beilei.xing
2023-05-31 10:25           ` [PATCH v6 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 13:04           ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 13:04             ` [PATCH v7 01/14] net/cpfl: refine structures beilei.xing
2023-05-31 13:04             ` [PATCH v7 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-05-31 13:04             ` [PATCH v7 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-31 13:04             ` [PATCH v7 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-31 13:04             ` [PATCH v7 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-31 13:04             ` [PATCH v7 06/14] common/idpf: add queue config API beilei.xing
2023-05-31 13:04             ` [PATCH v7 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-31 13:04             ` [PATCH v7 08/14] common/idpf: add switch queue API beilei.xing
2023-05-31 13:04             ` [PATCH v7 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-31 13:04             ` [PATCH v7 10/14] common/idpf: add irq map config API beilei.xing
2023-05-31 13:04             ` [PATCH v7 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-31 13:04             ` [PATCH v7 12/14] net/cpfl: support peer ports get beilei.xing
2023-05-31 13:04             ` [PATCH v7 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 13:04             ` [PATCH v7 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-05  6:17             ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05  6:17               ` [PATCH v8 01/14] net/cpfl: refine structures beilei.xing
2023-06-05  6:17               ` [PATCH v8 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-06-05  6:17               ` [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-06-05  8:35                 ` Wu, Jingjing
2023-06-05  8:53                   ` Xing, Beilei
2023-06-05  6:17               ` [PATCH v8 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-06-05  6:17               ` [PATCH v8 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-06-05  6:17               ` [PATCH v8 06/14] common/idpf: add queue config API beilei.xing
2023-06-05  6:17               ` [PATCH v8 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-06-05  6:17               ` [PATCH v8 08/14] common/idpf: add switch queue API beilei.xing
2023-06-05  6:17               ` [PATCH v8 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-06-05  6:17               ` [PATCH v8 10/14] common/idpf: add irq map config API beilei.xing
2023-06-05  6:17               ` [PATCH v8 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-06-05  6:17               ` [PATCH v8 12/14] net/cpfl: support peer ports get beilei.xing
2023-06-05 11:22                 ` Wu, Jingjing
2023-06-05  6:17               ` [PATCH v8 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-06-05  6:17               ` [PATCH v8 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-05  9:06               ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05  9:06                 ` [PATCH v9 01/14] net/cpfl: refine structures beilei.xing
2023-06-05  9:06                 ` [PATCH v9 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-06-05  9:06                 ` [PATCH v9 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-06-05  9:06                 ` [PATCH v9 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-06-05  9:06                 ` [PATCH v9 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-06-05  9:06                 ` [PATCH v9 06/14] common/idpf: add queue config API beilei.xing
2023-06-05  9:06                 ` [PATCH v9 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-06-05  9:06                 ` [PATCH v9 08/14] common/idpf: add switch queue API beilei.xing
2023-06-05  9:06                 ` [PATCH v9 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-06-05  9:06                 ` [PATCH v9 10/14] common/idpf: add irq map config API beilei.xing
2023-06-05  9:06                 ` [PATCH v9 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-06-05  9:06                 ` [PATCH v9 12/14] net/cpfl: support peer ports get beilei.xing
2023-06-05  9:06                 ` [PATCH v9 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-06-05  9:06                 ` [PATCH v9 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-06 10:03                 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-06  6:40                   ` Wu, Jingjing
2023-06-07  7:16                     ` Zhang, Qi Z
2023-06-06 10:03                   ` [PATCH v10 01/14] net/cpfl: refine structures beilei.xing
2023-06-06 10:03                   ` [PATCH v10 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-06-06 10:03                   ` [PATCH v10 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-06-06 10:03                   ` [PATCH v10 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-06-06 10:03                   ` [PATCH v10 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-06-06 10:03                   ` [PATCH v10 06/14] common/idpf: add queue config API beilei.xing
2023-06-06 10:03                   ` [PATCH v10 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-06-06 10:03                   ` [PATCH v10 08/14] common/idpf: add switch queue API beilei.xing
2023-06-06 10:03                   ` [PATCH v10 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-06-06 10:03                   ` [PATCH v10 10/14] common/idpf: add irq map config API beilei.xing
2023-06-06 10:03                   ` [PATCH v10 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-06-06 10:03                   ` [PATCH v10 12/14] net/cpfl: support peer ports get beilei.xing
2023-06-06 10:03                   ` [PATCH v10 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-06-06 10:03                   ` [PATCH v10 14/14] doc: update the doc of CPFL PMD beilei.xing

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230421065048.106899-2-beilei.xing@intel.com \
    --to=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=mingxia.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).